4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/seq_file.h>
19 #include <linux/debugobjects.h>
20 #include <linux/vmalloc.h>
21 #include <linux/kallsyms.h>
22 #include <linux/list.h>
23 #include <linux/rbtree.h>
24 #include <linux/radix-tree.h>
25 #include <linux/rcupdate.h>
27 #include <asm/atomic.h>
28 #include <asm/uaccess.h>
29 #include <asm/tlbflush.h>
32 /*** Page table manipulation functions ***/
34 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
38 pte
= pte_offset_kernel(pmd
, addr
);
40 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
41 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
42 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
45 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
)
50 pmd
= pmd_offset(pud
, addr
);
52 next
= pmd_addr_end(addr
, end
);
53 if (pmd_none_or_clear_bad(pmd
))
55 vunmap_pte_range(pmd
, addr
, next
);
56 } while (pmd
++, addr
= next
, addr
!= end
);
59 static void vunmap_pud_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
)
64 pud
= pud_offset(pgd
, addr
);
66 next
= pud_addr_end(addr
, end
);
67 if (pud_none_or_clear_bad(pud
))
69 vunmap_pmd_range(pud
, addr
, next
);
70 } while (pud
++, addr
= next
, addr
!= end
);
73 static void vunmap_page_range(unsigned long addr
, unsigned long end
)
79 pgd
= pgd_offset_k(addr
);
80 flush_cache_vunmap(addr
, end
);
82 next
= pgd_addr_end(addr
, end
);
83 if (pgd_none_or_clear_bad(pgd
))
85 vunmap_pud_range(pgd
, addr
, next
);
86 } while (pgd
++, addr
= next
, addr
!= end
);
89 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
90 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
95 * nr is a running index into the array which helps higher level
96 * callers keep track of where we're up to.
99 pte
= pte_alloc_kernel(pmd
, addr
);
103 struct page
*page
= pages
[*nr
];
105 if (WARN_ON(!pte_none(*pte
)))
109 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
111 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
115 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
116 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
121 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
125 next
= pmd_addr_end(addr
, end
);
126 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
, nr
))
128 } while (pmd
++, addr
= next
, addr
!= end
);
132 static int vmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
133 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
138 pud
= pud_alloc(&init_mm
, pgd
, addr
);
142 next
= pud_addr_end(addr
, end
);
143 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
, nr
))
145 } while (pud
++, addr
= next
, addr
!= end
);
150 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
151 * will have pfns corresponding to the "pages" array.
153 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
155 static int vmap_page_range(unsigned long addr
, unsigned long end
,
156 pgprot_t prot
, struct page
**pages
)
164 pgd
= pgd_offset_k(addr
);
166 next
= pgd_addr_end(addr
, end
);
167 err
= vmap_pud_range(pgd
, addr
, next
, prot
, pages
, &nr
);
170 } while (pgd
++, addr
= next
, addr
!= end
);
171 flush_cache_vmap(addr
, end
);
179 * Walk a vmap address to the struct page it maps.
181 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
183 unsigned long addr
= (unsigned long) vmalloc_addr
;
184 struct page
*page
= NULL
;
185 pgd_t
*pgd
= pgd_offset_k(addr
);
188 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
189 * architectures that do not vmalloc module space
191 VIRTUAL_BUG_ON(!is_vmalloc_addr(vmalloc_addr
) &&
192 !is_module_address(addr
));
194 if (!pgd_none(*pgd
)) {
195 pud_t
*pud
= pud_offset(pgd
, addr
);
196 if (!pud_none(*pud
)) {
197 pmd_t
*pmd
= pmd_offset(pud
, addr
);
198 if (!pmd_none(*pmd
)) {
201 ptep
= pte_offset_map(pmd
, addr
);
203 if (pte_present(pte
))
204 page
= pte_page(pte
);
211 EXPORT_SYMBOL(vmalloc_to_page
);
214 * Map a vmalloc()-space virtual address to the physical page frame number.
216 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
218 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
220 EXPORT_SYMBOL(vmalloc_to_pfn
);
223 /*** Global kva allocator ***/
225 #define VM_LAZY_FREE 0x01
226 #define VM_LAZY_FREEING 0x02
227 #define VM_VM_AREA 0x04
230 unsigned long va_start
;
231 unsigned long va_end
;
233 struct rb_node rb_node
; /* address sorted rbtree */
234 struct list_head list
; /* address sorted list */
235 struct list_head purge_list
; /* "lazy purge" list */
237 struct rcu_head rcu_head
;
240 static DEFINE_SPINLOCK(vmap_area_lock
);
241 static struct rb_root vmap_area_root
= RB_ROOT
;
242 static LIST_HEAD(vmap_area_list
);
244 static struct vmap_area
*__find_vmap_area(unsigned long addr
)
246 struct rb_node
*n
= vmap_area_root
.rb_node
;
249 struct vmap_area
*va
;
251 va
= rb_entry(n
, struct vmap_area
, rb_node
);
252 if (addr
< va
->va_start
)
254 else if (addr
> va
->va_start
)
263 static void __insert_vmap_area(struct vmap_area
*va
)
265 struct rb_node
**p
= &vmap_area_root
.rb_node
;
266 struct rb_node
*parent
= NULL
;
270 struct vmap_area
*tmp
;
273 tmp
= rb_entry(parent
, struct vmap_area
, rb_node
);
274 if (va
->va_start
< tmp
->va_end
)
276 else if (va
->va_end
> tmp
->va_start
)
282 rb_link_node(&va
->rb_node
, parent
, p
);
283 rb_insert_color(&va
->rb_node
, &vmap_area_root
);
285 /* address-sort this list so it is usable like the vmlist */
286 tmp
= rb_prev(&va
->rb_node
);
288 struct vmap_area
*prev
;
289 prev
= rb_entry(tmp
, struct vmap_area
, rb_node
);
290 list_add_rcu(&va
->list
, &prev
->list
);
292 list_add_rcu(&va
->list
, &vmap_area_list
);
295 static void purge_vmap_area_lazy(void);
298 * Allocate a region of KVA of the specified size and alignment, within the
301 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
303 unsigned long vstart
, unsigned long vend
,
304 int node
, gfp_t gfp_mask
)
306 struct vmap_area
*va
;
311 BUG_ON(size
& ~PAGE_MASK
);
313 addr
= ALIGN(vstart
, align
);
315 va
= kmalloc_node(sizeof(struct vmap_area
),
316 gfp_mask
& GFP_RECLAIM_MASK
, node
);
318 return ERR_PTR(-ENOMEM
);
321 spin_lock(&vmap_area_lock
);
322 /* XXX: could have a last_hole cache */
323 n
= vmap_area_root
.rb_node
;
325 struct vmap_area
*first
= NULL
;
328 struct vmap_area
*tmp
;
329 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
330 if (tmp
->va_end
>= addr
) {
331 if (!first
&& tmp
->va_start
< addr
+ size
)
343 if (first
->va_end
< addr
) {
344 n
= rb_next(&first
->rb_node
);
346 first
= rb_entry(n
, struct vmap_area
, rb_node
);
351 while (addr
+ size
>= first
->va_start
&& addr
+ size
<= vend
) {
352 addr
= ALIGN(first
->va_end
+ PAGE_SIZE
, align
);
354 n
= rb_next(&first
->rb_node
);
356 first
= rb_entry(n
, struct vmap_area
, rb_node
);
362 if (addr
+ size
> vend
) {
363 spin_unlock(&vmap_area_lock
);
365 purge_vmap_area_lazy();
369 if (printk_ratelimit())
370 printk(KERN_WARNING
"vmap allocation failed: "
371 "use vmalloc=<size> to increase size.\n");
372 return ERR_PTR(-EBUSY
);
375 BUG_ON(addr
& (align
-1));
378 va
->va_end
= addr
+ size
;
380 __insert_vmap_area(va
);
381 spin_unlock(&vmap_area_lock
);
386 static void rcu_free_va(struct rcu_head
*head
)
388 struct vmap_area
*va
= container_of(head
, struct vmap_area
, rcu_head
);
393 static void __free_vmap_area(struct vmap_area
*va
)
395 BUG_ON(RB_EMPTY_NODE(&va
->rb_node
));
396 rb_erase(&va
->rb_node
, &vmap_area_root
);
397 RB_CLEAR_NODE(&va
->rb_node
);
398 list_del_rcu(&va
->list
);
400 call_rcu(&va
->rcu_head
, rcu_free_va
);
404 * Free a region of KVA allocated by alloc_vmap_area
406 static void free_vmap_area(struct vmap_area
*va
)
408 spin_lock(&vmap_area_lock
);
409 __free_vmap_area(va
);
410 spin_unlock(&vmap_area_lock
);
414 * Clear the pagetable entries of a given vmap_area
416 static void unmap_vmap_area(struct vmap_area
*va
)
418 vunmap_page_range(va
->va_start
, va
->va_end
);
422 * lazy_max_pages is the maximum amount of virtual address space we gather up
423 * before attempting to purge with a TLB flush.
425 * There is a tradeoff here: a larger number will cover more kernel page tables
426 * and take slightly longer to purge, but it will linearly reduce the number of
427 * global TLB flushes that must be performed. It would seem natural to scale
428 * this number up linearly with the number of CPUs (because vmapping activity
429 * could also scale linearly with the number of CPUs), however it is likely
430 * that in practice, workloads might be constrained in other ways that mean
431 * vmap activity will not scale linearly with CPUs. Also, I want to be
432 * conservative and not introduce a big latency on huge systems, so go with
433 * a less aggressive log scale. It will still be an improvement over the old
434 * code, and it will be simple to change the scale factor if we find that it
435 * becomes a problem on bigger systems.
437 static unsigned long lazy_max_pages(void)
441 log
= fls(num_online_cpus());
443 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
446 static atomic_t vmap_lazy_nr
= ATOMIC_INIT(0);
449 * Purges all lazily-freed vmap areas.
451 * If sync is 0 then don't purge if there is already a purge in progress.
452 * If force_flush is 1, then flush kernel TLBs between *start and *end even
453 * if we found no lazy vmap areas to unmap (callers can use this to optimise
454 * their own TLB flushing).
455 * Returns with *start = min(*start, lowest purged address)
456 * *end = max(*end, highest purged address)
458 static void __purge_vmap_area_lazy(unsigned long *start
, unsigned long *end
,
459 int sync
, int force_flush
)
461 static DEFINE_SPINLOCK(purge_lock
);
463 struct vmap_area
*va
;
467 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
468 * should not expect such behaviour. This just simplifies locking for
469 * the case that isn't actually used at the moment anyway.
471 if (!sync
&& !force_flush
) {
472 if (!spin_trylock(&purge_lock
))
475 spin_lock(&purge_lock
);
478 list_for_each_entry_rcu(va
, &vmap_area_list
, list
) {
479 if (va
->flags
& VM_LAZY_FREE
) {
480 if (va
->va_start
< *start
)
481 *start
= va
->va_start
;
482 if (va
->va_end
> *end
)
484 nr
+= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
486 list_add_tail(&va
->purge_list
, &valist
);
487 va
->flags
|= VM_LAZY_FREEING
;
488 va
->flags
&= ~VM_LAZY_FREE
;
494 BUG_ON(nr
> atomic_read(&vmap_lazy_nr
));
495 atomic_sub(nr
, &vmap_lazy_nr
);
498 if (nr
|| force_flush
)
499 flush_tlb_kernel_range(*start
, *end
);
502 spin_lock(&vmap_area_lock
);
503 list_for_each_entry(va
, &valist
, purge_list
)
504 __free_vmap_area(va
);
505 spin_unlock(&vmap_area_lock
);
507 spin_unlock(&purge_lock
);
511 * Kick off a purge of the outstanding lazy areas.
513 static void purge_vmap_area_lazy(void)
515 unsigned long start
= ULONG_MAX
, end
= 0;
517 __purge_vmap_area_lazy(&start
, &end
, 0, 0);
521 * Free and unmap a vmap area
523 static void free_unmap_vmap_area(struct vmap_area
*va
)
525 va
->flags
|= VM_LAZY_FREE
;
526 atomic_add((va
->va_end
- va
->va_start
) >> PAGE_SHIFT
, &vmap_lazy_nr
);
527 if (unlikely(atomic_read(&vmap_lazy_nr
) > lazy_max_pages()))
528 purge_vmap_area_lazy();
531 static struct vmap_area
*find_vmap_area(unsigned long addr
)
533 struct vmap_area
*va
;
535 spin_lock(&vmap_area_lock
);
536 va
= __find_vmap_area(addr
);
537 spin_unlock(&vmap_area_lock
);
542 static void free_unmap_vmap_area_addr(unsigned long addr
)
544 struct vmap_area
*va
;
546 va
= find_vmap_area(addr
);
548 free_unmap_vmap_area(va
);
552 /*** Per cpu kva allocator ***/
555 * vmap space is limited especially on 32 bit architectures. Ensure there is
556 * room for at least 16 percpu vmap blocks per CPU.
559 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
560 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
561 * instead (we just need a rough idea)
563 #if BITS_PER_LONG == 32
564 #define VMALLOC_SPACE (128UL*1024*1024)
566 #define VMALLOC_SPACE (128UL*1024*1024*1024)
569 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
570 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
571 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
572 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
573 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
574 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
575 #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
576 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
577 VMALLOC_PAGES / NR_CPUS / 16))
579 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
581 struct vmap_block_queue
{
583 struct list_head free
;
584 struct list_head dirty
;
585 unsigned int nr_dirty
;
590 struct vmap_area
*va
;
591 struct vmap_block_queue
*vbq
;
592 unsigned long free
, dirty
;
593 DECLARE_BITMAP(alloc_map
, VMAP_BBMAP_BITS
);
594 DECLARE_BITMAP(dirty_map
, VMAP_BBMAP_BITS
);
597 struct list_head free_list
;
598 struct list_head dirty_list
;
600 struct rcu_head rcu_head
;
604 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
605 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
608 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
609 * in the free path. Could get rid of this if we change the API to return a
610 * "cookie" from alloc, to be passed to free. But no big deal yet.
612 static DEFINE_SPINLOCK(vmap_block_tree_lock
);
613 static RADIX_TREE(vmap_block_tree
, GFP_ATOMIC
);
616 * We should probably have a fallback mechanism to allocate virtual memory
617 * out of partially filled vmap blocks. However vmap block sizing should be
618 * fairly reasonable according to the vmalloc size, so it shouldn't be a
622 static unsigned long addr_to_vb_idx(unsigned long addr
)
624 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
625 addr
/= VMAP_BLOCK_SIZE
;
629 static struct vmap_block
*new_vmap_block(gfp_t gfp_mask
)
631 struct vmap_block_queue
*vbq
;
632 struct vmap_block
*vb
;
633 struct vmap_area
*va
;
634 unsigned long vb_idx
;
637 node
= numa_node_id();
639 vb
= kmalloc_node(sizeof(struct vmap_block
),
640 gfp_mask
& GFP_RECLAIM_MASK
, node
);
642 return ERR_PTR(-ENOMEM
);
644 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
645 VMALLOC_START
, VMALLOC_END
,
647 if (unlikely(IS_ERR(va
))) {
649 return ERR_PTR(PTR_ERR(va
));
652 err
= radix_tree_preload(gfp_mask
);
659 spin_lock_init(&vb
->lock
);
661 vb
->free
= VMAP_BBMAP_BITS
;
663 bitmap_zero(vb
->alloc_map
, VMAP_BBMAP_BITS
);
664 bitmap_zero(vb
->dirty_map
, VMAP_BBMAP_BITS
);
665 INIT_LIST_HEAD(&vb
->free_list
);
666 INIT_LIST_HEAD(&vb
->dirty_list
);
668 vb_idx
= addr_to_vb_idx(va
->va_start
);
669 spin_lock(&vmap_block_tree_lock
);
670 err
= radix_tree_insert(&vmap_block_tree
, vb_idx
, vb
);
671 spin_unlock(&vmap_block_tree_lock
);
673 radix_tree_preload_end();
675 vbq
= &get_cpu_var(vmap_block_queue
);
677 spin_lock(&vbq
->lock
);
678 list_add(&vb
->free_list
, &vbq
->free
);
679 spin_unlock(&vbq
->lock
);
680 put_cpu_var(vmap_cpu_blocks
);
685 static void rcu_free_vb(struct rcu_head
*head
)
687 struct vmap_block
*vb
= container_of(head
, struct vmap_block
, rcu_head
);
692 static void free_vmap_block(struct vmap_block
*vb
)
694 struct vmap_block
*tmp
;
695 unsigned long vb_idx
;
697 spin_lock(&vb
->vbq
->lock
);
698 if (!list_empty(&vb
->free_list
))
699 list_del(&vb
->free_list
);
700 if (!list_empty(&vb
->dirty_list
))
701 list_del(&vb
->dirty_list
);
702 spin_unlock(&vb
->vbq
->lock
);
704 vb_idx
= addr_to_vb_idx(vb
->va
->va_start
);
705 spin_lock(&vmap_block_tree_lock
);
706 tmp
= radix_tree_delete(&vmap_block_tree
, vb_idx
);
707 spin_unlock(&vmap_block_tree_lock
);
710 free_unmap_vmap_area(vb
->va
);
711 call_rcu(&vb
->rcu_head
, rcu_free_vb
);
714 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
716 struct vmap_block_queue
*vbq
;
717 struct vmap_block
*vb
;
718 unsigned long addr
= 0;
721 BUG_ON(size
& ~PAGE_MASK
);
722 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
723 order
= get_order(size
);
727 vbq
= &get_cpu_var(vmap_block_queue
);
728 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
731 spin_lock(&vb
->lock
);
732 i
= bitmap_find_free_region(vb
->alloc_map
,
733 VMAP_BBMAP_BITS
, order
);
736 addr
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
737 BUG_ON(addr_to_vb_idx(addr
) !=
738 addr_to_vb_idx(vb
->va
->va_start
));
739 vb
->free
-= 1UL << order
;
741 spin_lock(&vbq
->lock
);
742 list_del_init(&vb
->free_list
);
743 spin_unlock(&vbq
->lock
);
745 spin_unlock(&vb
->lock
);
748 spin_unlock(&vb
->lock
);
750 put_cpu_var(vmap_cpu_blocks
);
754 vb
= new_vmap_block(gfp_mask
);
763 static void vb_free(const void *addr
, unsigned long size
)
765 unsigned long offset
;
766 unsigned long vb_idx
;
768 struct vmap_block
*vb
;
770 BUG_ON(size
& ~PAGE_MASK
);
771 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
772 order
= get_order(size
);
774 offset
= (unsigned long)addr
& (VMAP_BLOCK_SIZE
- 1);
776 vb_idx
= addr_to_vb_idx((unsigned long)addr
);
778 vb
= radix_tree_lookup(&vmap_block_tree
, vb_idx
);
782 spin_lock(&vb
->lock
);
783 bitmap_allocate_region(vb
->dirty_map
, offset
>> PAGE_SHIFT
, order
);
785 spin_lock(&vb
->vbq
->lock
);
786 list_add(&vb
->dirty_list
, &vb
->vbq
->dirty
);
787 spin_unlock(&vb
->vbq
->lock
);
789 vb
->dirty
+= 1UL << order
;
790 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
791 BUG_ON(vb
->free
|| !list_empty(&vb
->free_list
));
792 spin_unlock(&vb
->lock
);
795 spin_unlock(&vb
->lock
);
799 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
801 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
802 * to amortize TLB flushing overheads. What this means is that any page you
803 * have now, may, in a former life, have been mapped into kernel virtual
804 * address by the vmap layer and so there might be some CPUs with TLB entries
805 * still referencing that page (additional to the regular 1:1 kernel mapping).
807 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
808 * be sure that none of the pages we have control over will have any aliases
809 * from the vmap layer.
811 void vm_unmap_aliases(void)
813 unsigned long start
= ULONG_MAX
, end
= 0;
817 for_each_possible_cpu(cpu
) {
818 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
819 struct vmap_block
*vb
;
822 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
825 spin_lock(&vb
->lock
);
826 i
= find_first_bit(vb
->dirty_map
, VMAP_BBMAP_BITS
);
827 while (i
< VMAP_BBMAP_BITS
) {
830 j
= find_next_zero_bit(vb
->dirty_map
,
833 s
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
834 e
= vb
->va
->va_start
+ (j
<< PAGE_SHIFT
);
835 vunmap_page_range(s
, e
);
844 i
= find_next_bit(vb
->dirty_map
,
847 spin_unlock(&vb
->lock
);
852 __purge_vmap_area_lazy(&start
, &end
, 1, flush
);
854 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
857 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
858 * @mem: the pointer returned by vm_map_ram
859 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
861 void vm_unmap_ram(const void *mem
, unsigned int count
)
863 unsigned long size
= count
<< PAGE_SHIFT
;
864 unsigned long addr
= (unsigned long)mem
;
867 BUG_ON(addr
< VMALLOC_START
);
868 BUG_ON(addr
> VMALLOC_END
);
869 BUG_ON(addr
& (PAGE_SIZE
-1));
871 debug_check_no_locks_freed(mem
, size
);
873 if (likely(count
<= VMAP_MAX_ALLOC
))
876 free_unmap_vmap_area_addr(addr
);
878 EXPORT_SYMBOL(vm_unmap_ram
);
881 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
882 * @pages: an array of pointers to the pages to be mapped
883 * @count: number of pages
884 * @node: prefer to allocate data structures on this node
885 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
886 * @returns: a pointer to the address that has been mapped, or NULL on failure
888 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
, pgprot_t prot
)
890 unsigned long size
= count
<< PAGE_SHIFT
;
894 if (likely(count
<= VMAP_MAX_ALLOC
)) {
895 mem
= vb_alloc(size
, GFP_KERNEL
);
898 addr
= (unsigned long)mem
;
900 struct vmap_area
*va
;
901 va
= alloc_vmap_area(size
, PAGE_SIZE
,
902 VMALLOC_START
, VMALLOC_END
, node
, GFP_KERNEL
);
909 if (vmap_page_range(addr
, addr
+ size
, prot
, pages
) < 0) {
910 vm_unmap_ram(mem
, count
);
915 EXPORT_SYMBOL(vm_map_ram
);
917 void __init
vmalloc_init(void)
921 for_each_possible_cpu(i
) {
922 struct vmap_block_queue
*vbq
;
924 vbq
= &per_cpu(vmap_block_queue
, i
);
925 spin_lock_init(&vbq
->lock
);
926 INIT_LIST_HEAD(&vbq
->free
);
927 INIT_LIST_HEAD(&vbq
->dirty
);
932 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
934 unsigned long end
= addr
+ size
;
935 vunmap_page_range(addr
, end
);
936 flush_tlb_kernel_range(addr
, end
);
939 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
941 unsigned long addr
= (unsigned long)area
->addr
;
942 unsigned long end
= addr
+ area
->size
- PAGE_SIZE
;
945 err
= vmap_page_range(addr
, end
, prot
, *pages
);
953 EXPORT_SYMBOL_GPL(map_vm_area
);
955 /*** Old vmalloc interfaces ***/
956 DEFINE_RWLOCK(vmlist_lock
);
957 struct vm_struct
*vmlist
;
959 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
960 unsigned long flags
, unsigned long start
, unsigned long end
,
961 int node
, gfp_t gfp_mask
, void *caller
)
963 static struct vmap_area
*va
;
964 struct vm_struct
*area
;
965 struct vm_struct
*tmp
, **p
;
966 unsigned long align
= 1;
968 BUG_ON(in_interrupt());
969 if (flags
& VM_IOREMAP
) {
972 if (bit
> IOREMAP_MAX_ORDER
)
973 bit
= IOREMAP_MAX_ORDER
;
974 else if (bit
< PAGE_SHIFT
)
980 size
= PAGE_ALIGN(size
);
984 area
= kmalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
989 * We always allocate a guard page.
993 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
);
1000 area
->addr
= (void *)va
->va_start
;
1004 area
->phys_addr
= 0;
1005 area
->caller
= caller
;
1007 va
->flags
|= VM_VM_AREA
;
1009 write_lock(&vmlist_lock
);
1010 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
1011 if (tmp
->addr
>= area
->addr
)
1016 write_unlock(&vmlist_lock
);
1021 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
1022 unsigned long start
, unsigned long end
)
1024 return __get_vm_area_node(size
, flags
, start
, end
, -1, GFP_KERNEL
,
1025 __builtin_return_address(0));
1027 EXPORT_SYMBOL_GPL(__get_vm_area
);
1030 * get_vm_area - reserve a contiguous kernel virtual area
1031 * @size: size of the area
1032 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1034 * Search an area of @size in the kernel virtual mapping area,
1035 * and reserved it for out purposes. Returns the area descriptor
1036 * on success or %NULL on failure.
1038 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
1040 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
1041 -1, GFP_KERNEL
, __builtin_return_address(0));
1044 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
1047 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
,
1048 -1, GFP_KERNEL
, caller
);
1051 struct vm_struct
*get_vm_area_node(unsigned long size
, unsigned long flags
,
1052 int node
, gfp_t gfp_mask
)
1054 return __get_vm_area_node(size
, flags
, VMALLOC_START
, VMALLOC_END
, node
,
1055 gfp_mask
, __builtin_return_address(0));
1058 static struct vm_struct
*find_vm_area(const void *addr
)
1060 struct vmap_area
*va
;
1062 va
= find_vmap_area((unsigned long)addr
);
1063 if (va
&& va
->flags
& VM_VM_AREA
)
1070 * remove_vm_area - find and remove a continuous kernel virtual area
1071 * @addr: base address
1073 * Search for the kernel VM area starting at @addr, and remove it.
1074 * This function returns the found VM area, but using it is NOT safe
1075 * on SMP machines, except for its size or flags.
1077 struct vm_struct
*remove_vm_area(const void *addr
)
1079 struct vmap_area
*va
;
1081 va
= find_vmap_area((unsigned long)addr
);
1082 if (va
&& va
->flags
& VM_VM_AREA
) {
1083 struct vm_struct
*vm
= va
->private;
1084 struct vm_struct
*tmp
, **p
;
1085 free_unmap_vmap_area(va
);
1086 vm
->size
-= PAGE_SIZE
;
1088 write_lock(&vmlist_lock
);
1089 for (p
= &vmlist
; (tmp
= *p
) != vm
; p
= &tmp
->next
)
1092 write_unlock(&vmlist_lock
);
1099 static void __vunmap(const void *addr
, int deallocate_pages
)
1101 struct vm_struct
*area
;
1106 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
1107 WARN(1, KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
1111 area
= remove_vm_area(addr
);
1112 if (unlikely(!area
)) {
1113 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
1118 debug_check_no_locks_freed(addr
, area
->size
);
1119 debug_check_no_obj_freed(addr
, area
->size
);
1121 if (deallocate_pages
) {
1124 for (i
= 0; i
< area
->nr_pages
; i
++) {
1125 struct page
*page
= area
->pages
[i
];
1131 if (area
->flags
& VM_VPAGES
)
1142 * vfree - release memory allocated by vmalloc()
1143 * @addr: memory base address
1145 * Free the virtually continuous memory area starting at @addr, as
1146 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1147 * NULL, no operation is performed.
1149 * Must not be called in interrupt context.
1151 void vfree(const void *addr
)
1153 BUG_ON(in_interrupt());
1156 EXPORT_SYMBOL(vfree
);
1159 * vunmap - release virtual mapping obtained by vmap()
1160 * @addr: memory base address
1162 * Free the virtually contiguous memory area starting at @addr,
1163 * which was created from the page array passed to vmap().
1165 * Must not be called in interrupt context.
1167 void vunmap(const void *addr
)
1169 BUG_ON(in_interrupt());
1172 EXPORT_SYMBOL(vunmap
);
1175 * vmap - map an array of pages into virtually contiguous space
1176 * @pages: array of page pointers
1177 * @count: number of pages to map
1178 * @flags: vm_area->flags
1179 * @prot: page protection for the mapping
1181 * Maps @count pages from @pages into contiguous kernel virtual
1184 void *vmap(struct page
**pages
, unsigned int count
,
1185 unsigned long flags
, pgprot_t prot
)
1187 struct vm_struct
*area
;
1189 if (count
> num_physpages
)
1192 area
= get_vm_area_caller((count
<< PAGE_SHIFT
), flags
,
1193 __builtin_return_address(0));
1197 if (map_vm_area(area
, prot
, &pages
)) {
1204 EXPORT_SYMBOL(vmap
);
1206 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
1207 int node
, void *caller
);
1208 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
1209 pgprot_t prot
, int node
, void *caller
)
1211 struct page
**pages
;
1212 unsigned int nr_pages
, array_size
, i
;
1214 nr_pages
= (area
->size
- PAGE_SIZE
) >> PAGE_SHIFT
;
1215 array_size
= (nr_pages
* sizeof(struct page
*));
1217 area
->nr_pages
= nr_pages
;
1218 /* Please note that the recursion is strictly bounded. */
1219 if (array_size
> PAGE_SIZE
) {
1220 pages
= __vmalloc_node(array_size
, gfp_mask
| __GFP_ZERO
,
1221 PAGE_KERNEL
, node
, caller
);
1222 area
->flags
|= VM_VPAGES
;
1224 pages
= kmalloc_node(array_size
,
1225 (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
,
1228 area
->pages
= pages
;
1229 area
->caller
= caller
;
1231 remove_vm_area(area
->addr
);
1236 for (i
= 0; i
< area
->nr_pages
; i
++) {
1240 page
= alloc_page(gfp_mask
);
1242 page
= alloc_pages_node(node
, gfp_mask
, 0);
1244 if (unlikely(!page
)) {
1245 /* Successfully allocated i pages, free them in __vunmap() */
1249 area
->pages
[i
] = page
;
1252 if (map_vm_area(area
, prot
, &pages
))
1261 void *__vmalloc_area(struct vm_struct
*area
, gfp_t gfp_mask
, pgprot_t prot
)
1263 return __vmalloc_area_node(area
, gfp_mask
, prot
, -1,
1264 __builtin_return_address(0));
1268 * __vmalloc_node - allocate virtually contiguous memory
1269 * @size: allocation size
1270 * @gfp_mask: flags for the page level allocator
1271 * @prot: protection mask for the allocated pages
1272 * @node: node to use for allocation or -1
1273 * @caller: caller's return address
1275 * Allocate enough pages to cover @size from the page level
1276 * allocator with @gfp_mask flags. Map them into contiguous
1277 * kernel virtual space, using a pagetable protection of @prot.
1279 static void *__vmalloc_node(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
,
1280 int node
, void *caller
)
1282 struct vm_struct
*area
;
1284 size
= PAGE_ALIGN(size
);
1285 if (!size
|| (size
>> PAGE_SHIFT
) > num_physpages
)
1288 area
= __get_vm_area_node(size
, VM_ALLOC
, VMALLOC_START
, VMALLOC_END
,
1289 node
, gfp_mask
, caller
);
1294 return __vmalloc_area_node(area
, gfp_mask
, prot
, node
, caller
);
1297 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
1299 return __vmalloc_node(size
, gfp_mask
, prot
, -1,
1300 __builtin_return_address(0));
1302 EXPORT_SYMBOL(__vmalloc
);
1305 * vmalloc - allocate virtually contiguous memory
1306 * @size: allocation size
1307 * Allocate enough pages to cover @size from the page level
1308 * allocator and map them into contiguous kernel virtual space.
1310 * For tight control over page level allocator and protection flags
1311 * use __vmalloc() instead.
1313 void *vmalloc(unsigned long size
)
1315 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1316 -1, __builtin_return_address(0));
1318 EXPORT_SYMBOL(vmalloc
);
1321 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1322 * @size: allocation size
1324 * The resulting memory area is zeroed so it can be mapped to userspace
1325 * without leaking data.
1327 void *vmalloc_user(unsigned long size
)
1329 struct vm_struct
*area
;
1332 ret
= __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
, PAGE_KERNEL
);
1334 area
= find_vm_area(ret
);
1335 area
->flags
|= VM_USERMAP
;
1339 EXPORT_SYMBOL(vmalloc_user
);
1342 * vmalloc_node - allocate memory on a specific node
1343 * @size: allocation size
1346 * Allocate enough pages to cover @size from the page level
1347 * allocator and map them into contiguous kernel virtual space.
1349 * For tight control over page level allocator and protection flags
1350 * use __vmalloc() instead.
1352 void *vmalloc_node(unsigned long size
, int node
)
1354 return __vmalloc_node(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1355 node
, __builtin_return_address(0));
1357 EXPORT_SYMBOL(vmalloc_node
);
1359 #ifndef PAGE_KERNEL_EXEC
1360 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1364 * vmalloc_exec - allocate virtually contiguous, executable memory
1365 * @size: allocation size
1367 * Kernel-internal function to allocate enough pages to cover @size
1368 * the page level allocator and map them into contiguous and
1369 * executable kernel virtual space.
1371 * For tight control over page level allocator and protection flags
1372 * use __vmalloc() instead.
1375 void *vmalloc_exec(unsigned long size
)
1377 return __vmalloc(size
, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
);
1380 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1381 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1382 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1383 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1385 #define GFP_VMALLOC32 GFP_KERNEL
1389 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1390 * @size: allocation size
1392 * Allocate enough 32bit PA addressable pages to cover @size from the
1393 * page level allocator and map them into contiguous kernel virtual space.
1395 void *vmalloc_32(unsigned long size
)
1397 return __vmalloc(size
, GFP_VMALLOC32
, PAGE_KERNEL
);
1399 EXPORT_SYMBOL(vmalloc_32
);
1402 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1403 * @size: allocation size
1405 * The resulting memory area is 32bit addressable and zeroed so it can be
1406 * mapped to userspace without leaking data.
1408 void *vmalloc_32_user(unsigned long size
)
1410 struct vm_struct
*area
;
1413 ret
= __vmalloc(size
, GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
);
1415 area
= find_vm_area(ret
);
1416 area
->flags
|= VM_USERMAP
;
1420 EXPORT_SYMBOL(vmalloc_32_user
);
1422 long vread(char *buf
, char *addr
, unsigned long count
)
1424 struct vm_struct
*tmp
;
1425 char *vaddr
, *buf_start
= buf
;
1428 /* Don't allow overflow */
1429 if ((unsigned long) addr
+ count
< count
)
1430 count
= -(unsigned long) addr
;
1432 read_lock(&vmlist_lock
);
1433 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1434 vaddr
= (char *) tmp
->addr
;
1435 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
1437 while (addr
< vaddr
) {
1445 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
1456 read_unlock(&vmlist_lock
);
1457 return buf
- buf_start
;
1460 long vwrite(char *buf
, char *addr
, unsigned long count
)
1462 struct vm_struct
*tmp
;
1463 char *vaddr
, *buf_start
= buf
;
1466 /* Don't allow overflow */
1467 if ((unsigned long) addr
+ count
< count
)
1468 count
= -(unsigned long) addr
;
1470 read_lock(&vmlist_lock
);
1471 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1472 vaddr
= (char *) tmp
->addr
;
1473 if (addr
>= vaddr
+ tmp
->size
- PAGE_SIZE
)
1475 while (addr
< vaddr
) {
1482 n
= vaddr
+ tmp
->size
- PAGE_SIZE
- addr
;
1493 read_unlock(&vmlist_lock
);
1494 return buf
- buf_start
;
1498 * remap_vmalloc_range - map vmalloc pages to userspace
1499 * @vma: vma to cover (map full range of vma)
1500 * @addr: vmalloc memory
1501 * @pgoff: number of pages into addr before first page to map
1503 * Returns: 0 for success, -Exxx on failure
1505 * This function checks that addr is a valid vmalloc'ed area, and
1506 * that it is big enough to cover the vma. Will return failure if
1507 * that criteria isn't met.
1509 * Similar to remap_pfn_range() (see mm/memory.c)
1511 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
1512 unsigned long pgoff
)
1514 struct vm_struct
*area
;
1515 unsigned long uaddr
= vma
->vm_start
;
1516 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
1518 if ((PAGE_SIZE
-1) & (unsigned long)addr
)
1521 area
= find_vm_area(addr
);
1525 if (!(area
->flags
& VM_USERMAP
))
1528 if (usize
+ (pgoff
<< PAGE_SHIFT
) > area
->size
- PAGE_SIZE
)
1531 addr
+= pgoff
<< PAGE_SHIFT
;
1533 struct page
*page
= vmalloc_to_page(addr
);
1536 ret
= vm_insert_page(vma
, uaddr
, page
);
1543 } while (usize
> 0);
1545 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1546 vma
->vm_flags
|= VM_RESERVED
;
1550 EXPORT_SYMBOL(remap_vmalloc_range
);
1553 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1556 void __attribute__((weak
)) vmalloc_sync_all(void)
1561 static int f(pte_t
*pte
, pgtable_t table
, unsigned long addr
, void *data
)
1563 /* apply_to_page_range() does all the hard work. */
1568 * alloc_vm_area - allocate a range of kernel address space
1569 * @size: size of the area
1571 * Returns: NULL on failure, vm_struct on success
1573 * This function reserves a range of kernel address space, and
1574 * allocates pagetables to map that range. No actual mappings
1575 * are created. If the kernel address space is not shared
1576 * between processes, it syncs the pagetable across all
1579 struct vm_struct
*alloc_vm_area(size_t size
)
1581 struct vm_struct
*area
;
1583 area
= get_vm_area_caller(size
, VM_IOREMAP
,
1584 __builtin_return_address(0));
1589 * This ensures that page tables are constructed for this region
1590 * of kernel virtual address space and mapped into init_mm.
1592 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
1593 area
->size
, f
, NULL
)) {
1598 /* Make sure the pagetables are constructed in process kernel
1604 EXPORT_SYMBOL_GPL(alloc_vm_area
);
1606 void free_vm_area(struct vm_struct
*area
)
1608 struct vm_struct
*ret
;
1609 ret
= remove_vm_area(area
->addr
);
1610 BUG_ON(ret
!= area
);
1613 EXPORT_SYMBOL_GPL(free_vm_area
);
1616 #ifdef CONFIG_PROC_FS
1617 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
1620 struct vm_struct
*v
;
1622 read_lock(&vmlist_lock
);
1624 while (n
> 0 && v
) {
1635 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1637 struct vm_struct
*v
= p
;
1643 static void s_stop(struct seq_file
*m
, void *p
)
1645 read_unlock(&vmlist_lock
);
1648 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
1651 unsigned int nr
, *counters
= m
->private;
1656 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
1658 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
1659 counters
[page_to_nid(v
->pages
[nr
])]++;
1661 for_each_node_state(nr
, N_HIGH_MEMORY
)
1663 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
1667 static int s_show(struct seq_file
*m
, void *p
)
1669 struct vm_struct
*v
= p
;
1671 seq_printf(m
, "0x%p-0x%p %7ld",
1672 v
->addr
, v
->addr
+ v
->size
, v
->size
);
1675 char buff
[2 * KSYM_NAME_LEN
];
1678 sprint_symbol(buff
, (unsigned long)v
->caller
);
1683 seq_printf(m
, " pages=%d", v
->nr_pages
);
1686 seq_printf(m
, " phys=%lx", v
->phys_addr
);
1688 if (v
->flags
& VM_IOREMAP
)
1689 seq_printf(m
, " ioremap");
1691 if (v
->flags
& VM_ALLOC
)
1692 seq_printf(m
, " vmalloc");
1694 if (v
->flags
& VM_MAP
)
1695 seq_printf(m
, " vmap");
1697 if (v
->flags
& VM_USERMAP
)
1698 seq_printf(m
, " user");
1700 if (v
->flags
& VM_VPAGES
)
1701 seq_printf(m
, " vpages");
1703 show_numa_info(m
, v
);
1708 const struct seq_operations vmalloc_op
= {