4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/debugobjects.h>
22 #include <linux/kallsyms.h>
23 #include <linux/list.h>
24 #include <linux/rbtree.h>
25 #include <linux/radix-tree.h>
26 #include <linux/rcupdate.h>
27 #include <linux/pfn.h>
28 #include <linux/kmemleak.h>
29 #include <linux/atomic.h>
30 #include <linux/llist.h>
31 #include <asm/uaccess.h>
32 #include <asm/tlbflush.h>
33 #include <asm/shmparam.h>
35 struct vfree_deferred
{
36 struct llist_head list
;
37 struct work_struct wq
;
39 static DEFINE_PER_CPU(struct vfree_deferred
, vfree_deferred
);
41 static void __vunmap(const void *, int);
43 static void free_work(struct work_struct
*w
)
45 struct vfree_deferred
*p
= container_of(w
, struct vfree_deferred
, wq
);
46 struct llist_node
*llnode
= llist_del_all(&p
->list
);
49 llnode
= llist_next(llnode
);
54 /*** Page table manipulation functions ***/
56 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
60 pte
= pte_offset_kernel(pmd
, addr
);
62 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
63 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
64 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
67 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
)
72 pmd
= pmd_offset(pud
, addr
);
74 next
= pmd_addr_end(addr
, end
);
75 if (pmd_none_or_clear_bad(pmd
))
77 vunmap_pte_range(pmd
, addr
, next
);
78 } while (pmd
++, addr
= next
, addr
!= end
);
81 static void vunmap_pud_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
)
86 pud
= pud_offset(pgd
, addr
);
88 next
= pud_addr_end(addr
, end
);
89 if (pud_none_or_clear_bad(pud
))
91 vunmap_pmd_range(pud
, addr
, next
);
92 } while (pud
++, addr
= next
, addr
!= end
);
95 static void vunmap_page_range(unsigned long addr
, unsigned long end
)
101 pgd
= pgd_offset_k(addr
);
103 next
= pgd_addr_end(addr
, end
);
104 if (pgd_none_or_clear_bad(pgd
))
106 vunmap_pud_range(pgd
, addr
, next
);
107 } while (pgd
++, addr
= next
, addr
!= end
);
110 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
111 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
116 * nr is a running index into the array which helps higher level
117 * callers keep track of where we're up to.
120 pte
= pte_alloc_kernel(pmd
, addr
);
124 struct page
*page
= pages
[*nr
];
126 if (WARN_ON(!pte_none(*pte
)))
130 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
132 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
136 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
137 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
142 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
146 next
= pmd_addr_end(addr
, end
);
147 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
, nr
))
149 } while (pmd
++, addr
= next
, addr
!= end
);
153 static int vmap_pud_range(pgd_t
*pgd
, unsigned long addr
,
154 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
159 pud
= pud_alloc(&init_mm
, pgd
, addr
);
163 next
= pud_addr_end(addr
, end
);
164 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
, nr
))
166 } while (pud
++, addr
= next
, addr
!= end
);
171 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
172 * will have pfns corresponding to the "pages" array.
174 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
176 static int vmap_page_range_noflush(unsigned long start
, unsigned long end
,
177 pgprot_t prot
, struct page
**pages
)
181 unsigned long addr
= start
;
186 pgd
= pgd_offset_k(addr
);
188 next
= pgd_addr_end(addr
, end
);
189 err
= vmap_pud_range(pgd
, addr
, next
, prot
, pages
, &nr
);
192 } while (pgd
++, addr
= next
, addr
!= end
);
197 static int vmap_page_range(unsigned long start
, unsigned long end
,
198 pgprot_t prot
, struct page
**pages
)
202 ret
= vmap_page_range_noflush(start
, end
, prot
, pages
);
203 flush_cache_vmap(start
, end
);
207 int is_vmalloc_or_module_addr(const void *x
)
210 * ARM, x86-64 and sparc64 put modules in a special place,
211 * and fall back on vmalloc() if that fails. Others
212 * just put it in the vmalloc space.
214 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
215 unsigned long addr
= (unsigned long)x
;
216 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
219 return is_vmalloc_addr(x
);
223 * Walk a vmap address to the struct page it maps.
225 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
227 unsigned long addr
= (unsigned long) vmalloc_addr
;
228 struct page
*page
= NULL
;
229 pgd_t
*pgd
= pgd_offset_k(addr
);
232 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
233 * architectures that do not vmalloc module space
235 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
237 if (!pgd_none(*pgd
)) {
238 pud_t
*pud
= pud_offset(pgd
, addr
);
239 if (!pud_none(*pud
)) {
240 pmd_t
*pmd
= pmd_offset(pud
, addr
);
241 if (!pmd_none(*pmd
)) {
244 ptep
= pte_offset_map(pmd
, addr
);
246 if (pte_present(pte
))
247 page
= pte_page(pte
);
254 EXPORT_SYMBOL(vmalloc_to_page
);
257 * Map a vmalloc()-space virtual address to the physical page frame number.
259 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
261 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
263 EXPORT_SYMBOL(vmalloc_to_pfn
);
266 /*** Global kva allocator ***/
268 #define VM_LAZY_FREE 0x01
269 #define VM_LAZY_FREEING 0x02
270 #define VM_VM_AREA 0x04
272 static DEFINE_SPINLOCK(vmap_area_lock
);
273 /* Export for kexec only */
274 LIST_HEAD(vmap_area_list
);
275 static struct rb_root vmap_area_root
= RB_ROOT
;
277 /* The vmap cache globals are protected by vmap_area_lock */
278 static struct rb_node
*free_vmap_cache
;
279 static unsigned long cached_hole_size
;
280 static unsigned long cached_vstart
;
281 static unsigned long cached_align
;
283 static unsigned long vmap_area_pcpu_hole
;
285 static struct vmap_area
*__find_vmap_area(unsigned long addr
)
287 struct rb_node
*n
= vmap_area_root
.rb_node
;
290 struct vmap_area
*va
;
292 va
= rb_entry(n
, struct vmap_area
, rb_node
);
293 if (addr
< va
->va_start
)
295 else if (addr
> va
->va_start
)
304 static void __insert_vmap_area(struct vmap_area
*va
)
306 struct rb_node
**p
= &vmap_area_root
.rb_node
;
307 struct rb_node
*parent
= NULL
;
311 struct vmap_area
*tmp_va
;
314 tmp_va
= rb_entry(parent
, struct vmap_area
, rb_node
);
315 if (va
->va_start
< tmp_va
->va_end
)
317 else if (va
->va_end
> tmp_va
->va_start
)
323 rb_link_node(&va
->rb_node
, parent
, p
);
324 rb_insert_color(&va
->rb_node
, &vmap_area_root
);
326 /* address-sort this list */
327 tmp
= rb_prev(&va
->rb_node
);
329 struct vmap_area
*prev
;
330 prev
= rb_entry(tmp
, struct vmap_area
, rb_node
);
331 list_add_rcu(&va
->list
, &prev
->list
);
333 list_add_rcu(&va
->list
, &vmap_area_list
);
336 static void purge_vmap_area_lazy(void);
339 * Allocate a region of KVA of the specified size and alignment, within the
342 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
344 unsigned long vstart
, unsigned long vend
,
345 int node
, gfp_t gfp_mask
)
347 struct vmap_area
*va
;
351 struct vmap_area
*first
;
354 BUG_ON(size
& ~PAGE_MASK
);
355 BUG_ON(!is_power_of_2(align
));
357 va
= kmalloc_node(sizeof(struct vmap_area
),
358 gfp_mask
& GFP_RECLAIM_MASK
, node
);
360 return ERR_PTR(-ENOMEM
);
363 spin_lock(&vmap_area_lock
);
365 * Invalidate cache if we have more permissive parameters.
366 * cached_hole_size notes the largest hole noticed _below_
367 * the vmap_area cached in free_vmap_cache: if size fits
368 * into that hole, we want to scan from vstart to reuse
369 * the hole instead of allocating above free_vmap_cache.
370 * Note that __free_vmap_area may update free_vmap_cache
371 * without updating cached_hole_size or cached_align.
373 if (!free_vmap_cache
||
374 size
< cached_hole_size
||
375 vstart
< cached_vstart
||
376 align
< cached_align
) {
378 cached_hole_size
= 0;
379 free_vmap_cache
= NULL
;
381 /* record if we encounter less permissive parameters */
382 cached_vstart
= vstart
;
383 cached_align
= align
;
385 /* find starting point for our search */
386 if (free_vmap_cache
) {
387 first
= rb_entry(free_vmap_cache
, struct vmap_area
, rb_node
);
388 addr
= ALIGN(first
->va_end
, align
);
391 if (addr
+ size
< addr
)
395 addr
= ALIGN(vstart
, align
);
396 if (addr
+ size
< addr
)
399 n
= vmap_area_root
.rb_node
;
403 struct vmap_area
*tmp
;
404 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
405 if (tmp
->va_end
>= addr
) {
407 if (tmp
->va_start
<= addr
)
418 /* from the starting point, walk areas until a suitable hole is found */
419 while (addr
+ size
> first
->va_start
&& addr
+ size
<= vend
) {
420 if (addr
+ cached_hole_size
< first
->va_start
)
421 cached_hole_size
= first
->va_start
- addr
;
422 addr
= ALIGN(first
->va_end
, align
);
423 if (addr
+ size
< addr
)
427 if (list_is_last(&first->list, &vmap_area_list))
430 first = list_entry(first->list.next,
431 struct vmap_area, list);
433 n
= rb_next(&first
->rb_node
);
435 first
= rb_entry(n
, struct vmap_area
, rb_node
);
441 if (addr
+ size
> vend
)
445 va
->va_end
= addr
+ size
;
447 __insert_vmap_area(va
);
448 free_vmap_cache
= &va
->rb_node
;
449 spin_unlock(&vmap_area_lock
);
451 BUG_ON(va
->va_start
& (align
-1));
452 BUG_ON(va
->va_start
< vstart
);
453 BUG_ON(va
->va_end
> vend
);
458 spin_unlock(&vmap_area_lock
);
460 purge_vmap_area_lazy();
464 if (printk_ratelimit())
466 "vmap allocation for size %lu failed: "
467 "use vmalloc=<size> to increase size.\n", size
);
469 return ERR_PTR(-EBUSY
);
472 static void __free_vmap_area(struct vmap_area
*va
)
474 BUG_ON(RB_EMPTY_NODE(&va
->rb_node
));
476 if (free_vmap_cache
) {
477 if (va
->va_end
< cached_vstart
) {
478 free_vmap_cache
= NULL
;
480 struct vmap_area
*cache
;
481 cache
= rb_entry(free_vmap_cache
, struct vmap_area
, rb_node
);
482 if (va
->va_start
<= cache
->va_start
) {
483 free_vmap_cache
= rb_prev(&va
->rb_node
);
485 * We don't try to update cached_hole_size or
486 * cached_align, but it won't go very wrong.
491 rb_erase(&va
->rb_node
, &vmap_area_root
);
492 RB_CLEAR_NODE(&va
->rb_node
);
493 list_del_rcu(&va
->list
);
496 * Track the highest possible candidate for pcpu area
497 * allocation. Areas outside of vmalloc area can be returned
498 * here too, consider only end addresses which fall inside
499 * vmalloc area proper.
501 if (va
->va_end
> VMALLOC_START
&& va
->va_end
<= VMALLOC_END
)
502 vmap_area_pcpu_hole
= max(vmap_area_pcpu_hole
, va
->va_end
);
504 kfree_rcu(va
, rcu_head
);
508 * Free a region of KVA allocated by alloc_vmap_area
510 static void free_vmap_area(struct vmap_area
*va
)
512 spin_lock(&vmap_area_lock
);
513 __free_vmap_area(va
);
514 spin_unlock(&vmap_area_lock
);
518 * Clear the pagetable entries of a given vmap_area
520 static void unmap_vmap_area(struct vmap_area
*va
)
522 vunmap_page_range(va
->va_start
, va
->va_end
);
525 static void vmap_debug_free_range(unsigned long start
, unsigned long end
)
528 * Unmap page tables and force a TLB flush immediately if
529 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
530 * bugs similarly to those in linear kernel virtual address
531 * space after a page has been freed.
533 * All the lazy freeing logic is still retained, in order to
534 * minimise intrusiveness of this debugging feature.
536 * This is going to be *slow* (linear kernel virtual address
537 * debugging doesn't do a broadcast TLB flush so it is a lot
540 #ifdef CONFIG_DEBUG_PAGEALLOC
541 vunmap_page_range(start
, end
);
542 flush_tlb_kernel_range(start
, end
);
547 * lazy_max_pages is the maximum amount of virtual address space we gather up
548 * before attempting to purge with a TLB flush.
550 * There is a tradeoff here: a larger number will cover more kernel page tables
551 * and take slightly longer to purge, but it will linearly reduce the number of
552 * global TLB flushes that must be performed. It would seem natural to scale
553 * this number up linearly with the number of CPUs (because vmapping activity
554 * could also scale linearly with the number of CPUs), however it is likely
555 * that in practice, workloads might be constrained in other ways that mean
556 * vmap activity will not scale linearly with CPUs. Also, I want to be
557 * conservative and not introduce a big latency on huge systems, so go with
558 * a less aggressive log scale. It will still be an improvement over the old
559 * code, and it will be simple to change the scale factor if we find that it
560 * becomes a problem on bigger systems.
562 static unsigned long lazy_max_pages(void)
566 log
= fls(num_online_cpus());
568 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
571 static atomic_t vmap_lazy_nr
= ATOMIC_INIT(0);
573 /* for per-CPU blocks */
574 static void purge_fragmented_blocks_allcpus(void);
577 * called before a call to iounmap() if the caller wants vm_area_struct's
580 void set_iounmap_nonlazy(void)
582 atomic_set(&vmap_lazy_nr
, lazy_max_pages()+1);
586 * Purges all lazily-freed vmap areas.
588 * If sync is 0 then don't purge if there is already a purge in progress.
589 * If force_flush is 1, then flush kernel TLBs between *start and *end even
590 * if we found no lazy vmap areas to unmap (callers can use this to optimise
591 * their own TLB flushing).
592 * Returns with *start = min(*start, lowest purged address)
593 * *end = max(*end, highest purged address)
595 static void __purge_vmap_area_lazy(unsigned long *start
, unsigned long *end
,
596 int sync
, int force_flush
)
598 static DEFINE_SPINLOCK(purge_lock
);
600 struct vmap_area
*va
;
601 struct vmap_area
*n_va
;
605 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
606 * should not expect such behaviour. This just simplifies locking for
607 * the case that isn't actually used at the moment anyway.
609 if (!sync
&& !force_flush
) {
610 if (!spin_trylock(&purge_lock
))
613 spin_lock(&purge_lock
);
616 purge_fragmented_blocks_allcpus();
619 list_for_each_entry_rcu(va
, &vmap_area_list
, list
) {
620 if (va
->flags
& VM_LAZY_FREE
) {
621 if (va
->va_start
< *start
)
622 *start
= va
->va_start
;
623 if (va
->va_end
> *end
)
625 nr
+= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
626 list_add_tail(&va
->purge_list
, &valist
);
627 va
->flags
|= VM_LAZY_FREEING
;
628 va
->flags
&= ~VM_LAZY_FREE
;
634 atomic_sub(nr
, &vmap_lazy_nr
);
636 if (nr
|| force_flush
)
637 flush_tlb_kernel_range(*start
, *end
);
640 spin_lock(&vmap_area_lock
);
641 list_for_each_entry_safe(va
, n_va
, &valist
, purge_list
)
642 __free_vmap_area(va
);
643 spin_unlock(&vmap_area_lock
);
645 spin_unlock(&purge_lock
);
649 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
650 * is already purging.
652 static void try_purge_vmap_area_lazy(void)
654 unsigned long start
= ULONG_MAX
, end
= 0;
656 __purge_vmap_area_lazy(&start
, &end
, 0, 0);
660 * Kick off a purge of the outstanding lazy areas.
662 static void purge_vmap_area_lazy(void)
664 unsigned long start
= ULONG_MAX
, end
= 0;
666 __purge_vmap_area_lazy(&start
, &end
, 1, 0);
670 * Free a vmap area, caller ensuring that the area has been unmapped
671 * and flush_cache_vunmap had been called for the correct range
674 static void free_vmap_area_noflush(struct vmap_area
*va
)
676 va
->flags
|= VM_LAZY_FREE
;
677 atomic_add((va
->va_end
- va
->va_start
) >> PAGE_SHIFT
, &vmap_lazy_nr
);
678 if (unlikely(atomic_read(&vmap_lazy_nr
) > lazy_max_pages()))
679 try_purge_vmap_area_lazy();
683 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
684 * called for the correct range previously.
686 static void free_unmap_vmap_area_noflush(struct vmap_area
*va
)
689 free_vmap_area_noflush(va
);
693 * Free and unmap a vmap area
695 static void free_unmap_vmap_area(struct vmap_area
*va
)
697 flush_cache_vunmap(va
->va_start
, va
->va_end
);
698 free_unmap_vmap_area_noflush(va
);
701 static struct vmap_area
*find_vmap_area(unsigned long addr
)
703 struct vmap_area
*va
;
705 spin_lock(&vmap_area_lock
);
706 va
= __find_vmap_area(addr
);
707 spin_unlock(&vmap_area_lock
);
712 static void free_unmap_vmap_area_addr(unsigned long addr
)
714 struct vmap_area
*va
;
716 va
= find_vmap_area(addr
);
718 free_unmap_vmap_area(va
);
722 /*** Per cpu kva allocator ***/
725 * vmap space is limited especially on 32 bit architectures. Ensure there is
726 * room for at least 16 percpu vmap blocks per CPU.
729 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
730 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
731 * instead (we just need a rough idea)
733 #if BITS_PER_LONG == 32
734 #define VMALLOC_SPACE (128UL*1024*1024)
736 #define VMALLOC_SPACE (128UL*1024*1024*1024)
739 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
740 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
741 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
742 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
743 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
744 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
745 #define VMAP_BBMAP_BITS \
746 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
747 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
748 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
750 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
752 static bool vmap_initialized __read_mostly
= false;
754 struct vmap_block_queue
{
756 struct list_head free
;
761 struct vmap_area
*va
;
762 struct vmap_block_queue
*vbq
;
763 unsigned long free
, dirty
;
764 DECLARE_BITMAP(alloc_map
, VMAP_BBMAP_BITS
);
765 DECLARE_BITMAP(dirty_map
, VMAP_BBMAP_BITS
);
766 struct list_head free_list
;
767 struct rcu_head rcu_head
;
768 struct list_head purge
;
771 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
772 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
775 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
776 * in the free path. Could get rid of this if we change the API to return a
777 * "cookie" from alloc, to be passed to free. But no big deal yet.
779 static DEFINE_SPINLOCK(vmap_block_tree_lock
);
780 static RADIX_TREE(vmap_block_tree
, GFP_ATOMIC
);
783 * We should probably have a fallback mechanism to allocate virtual memory
784 * out of partially filled vmap blocks. However vmap block sizing should be
785 * fairly reasonable according to the vmalloc size, so it shouldn't be a
789 static unsigned long addr_to_vb_idx(unsigned long addr
)
791 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
792 addr
/= VMAP_BLOCK_SIZE
;
796 static struct vmap_block
*new_vmap_block(gfp_t gfp_mask
)
798 struct vmap_block_queue
*vbq
;
799 struct vmap_block
*vb
;
800 struct vmap_area
*va
;
801 unsigned long vb_idx
;
804 node
= numa_node_id();
806 vb
= kmalloc_node(sizeof(struct vmap_block
),
807 gfp_mask
& GFP_RECLAIM_MASK
, node
);
809 return ERR_PTR(-ENOMEM
);
811 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
812 VMALLOC_START
, VMALLOC_END
,
819 err
= radix_tree_preload(gfp_mask
);
826 spin_lock_init(&vb
->lock
);
828 vb
->free
= VMAP_BBMAP_BITS
;
830 bitmap_zero(vb
->alloc_map
, VMAP_BBMAP_BITS
);
831 bitmap_zero(vb
->dirty_map
, VMAP_BBMAP_BITS
);
832 INIT_LIST_HEAD(&vb
->free_list
);
834 vb_idx
= addr_to_vb_idx(va
->va_start
);
835 spin_lock(&vmap_block_tree_lock
);
836 err
= radix_tree_insert(&vmap_block_tree
, vb_idx
, vb
);
837 spin_unlock(&vmap_block_tree_lock
);
839 radix_tree_preload_end();
841 vbq
= &get_cpu_var(vmap_block_queue
);
843 spin_lock(&vbq
->lock
);
844 list_add_rcu(&vb
->free_list
, &vbq
->free
);
845 spin_unlock(&vbq
->lock
);
846 put_cpu_var(vmap_block_queue
);
851 static void free_vmap_block(struct vmap_block
*vb
)
853 struct vmap_block
*tmp
;
854 unsigned long vb_idx
;
856 vb_idx
= addr_to_vb_idx(vb
->va
->va_start
);
857 spin_lock(&vmap_block_tree_lock
);
858 tmp
= radix_tree_delete(&vmap_block_tree
, vb_idx
);
859 spin_unlock(&vmap_block_tree_lock
);
862 free_vmap_area_noflush(vb
->va
);
863 kfree_rcu(vb
, rcu_head
);
866 static void purge_fragmented_blocks(int cpu
)
869 struct vmap_block
*vb
;
870 struct vmap_block
*n_vb
;
871 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
874 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
876 if (!(vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
))
879 spin_lock(&vb
->lock
);
880 if (vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
) {
881 vb
->free
= 0; /* prevent further allocs after releasing lock */
882 vb
->dirty
= VMAP_BBMAP_BITS
; /* prevent purging it again */
883 bitmap_fill(vb
->alloc_map
, VMAP_BBMAP_BITS
);
884 bitmap_fill(vb
->dirty_map
, VMAP_BBMAP_BITS
);
885 spin_lock(&vbq
->lock
);
886 list_del_rcu(&vb
->free_list
);
887 spin_unlock(&vbq
->lock
);
888 spin_unlock(&vb
->lock
);
889 list_add_tail(&vb
->purge
, &purge
);
891 spin_unlock(&vb
->lock
);
895 list_for_each_entry_safe(vb
, n_vb
, &purge
, purge
) {
896 list_del(&vb
->purge
);
901 static void purge_fragmented_blocks_thiscpu(void)
903 purge_fragmented_blocks(smp_processor_id());
906 static void purge_fragmented_blocks_allcpus(void)
910 for_each_possible_cpu(cpu
)
911 purge_fragmented_blocks(cpu
);
914 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
916 struct vmap_block_queue
*vbq
;
917 struct vmap_block
*vb
;
918 unsigned long addr
= 0;
922 BUG_ON(size
& ~PAGE_MASK
);
923 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
924 if (WARN_ON(size
== 0)) {
926 * Allocating 0 bytes isn't what caller wants since
927 * get_order(0) returns funny result. Just warn and terminate
932 order
= get_order(size
);
936 vbq
= &get_cpu_var(vmap_block_queue
);
937 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
940 spin_lock(&vb
->lock
);
941 if (vb
->free
< 1UL << order
) {
942 if (vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
) {
943 /* free left too small, handle as fragmented scenario */
949 i
= bitmap_find_free_region(vb
->alloc_map
,
950 VMAP_BBMAP_BITS
, order
);
953 if (vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
) {
954 /* fragmented and no outstanding allocations */
955 BUG_ON(vb
->dirty
!= VMAP_BBMAP_BITS
);
960 addr
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
961 BUG_ON(addr_to_vb_idx(addr
) !=
962 addr_to_vb_idx(vb
->va
->va_start
));
963 vb
->free
-= 1UL << order
;
965 spin_lock(&vbq
->lock
);
966 list_del_rcu(&vb
->free_list
);
967 spin_unlock(&vbq
->lock
);
969 spin_unlock(&vb
->lock
);
972 spin_unlock(&vb
->lock
);
976 purge_fragmented_blocks_thiscpu();
978 put_cpu_var(vmap_block_queue
);
982 vb
= new_vmap_block(gfp_mask
);
991 static void vb_free(const void *addr
, unsigned long size
)
993 unsigned long offset
;
994 unsigned long vb_idx
;
996 struct vmap_block
*vb
;
998 BUG_ON(size
& ~PAGE_MASK
);
999 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
1001 flush_cache_vunmap((unsigned long)addr
, (unsigned long)addr
+ size
);
1003 order
= get_order(size
);
1005 offset
= (unsigned long)addr
& (VMAP_BLOCK_SIZE
- 1);
1007 vb_idx
= addr_to_vb_idx((unsigned long)addr
);
1009 vb
= radix_tree_lookup(&vmap_block_tree
, vb_idx
);
1013 vunmap_page_range((unsigned long)addr
, (unsigned long)addr
+ size
);
1015 spin_lock(&vb
->lock
);
1016 BUG_ON(bitmap_allocate_region(vb
->dirty_map
, offset
>> PAGE_SHIFT
, order
));
1018 vb
->dirty
+= 1UL << order
;
1019 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
1021 spin_unlock(&vb
->lock
);
1022 free_vmap_block(vb
);
1024 spin_unlock(&vb
->lock
);
1028 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1030 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1031 * to amortize TLB flushing overheads. What this means is that any page you
1032 * have now, may, in a former life, have been mapped into kernel virtual
1033 * address by the vmap layer and so there might be some CPUs with TLB entries
1034 * still referencing that page (additional to the regular 1:1 kernel mapping).
1036 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1037 * be sure that none of the pages we have control over will have any aliases
1038 * from the vmap layer.
1040 void vm_unmap_aliases(void)
1042 unsigned long start
= ULONG_MAX
, end
= 0;
1046 if (unlikely(!vmap_initialized
))
1049 for_each_possible_cpu(cpu
) {
1050 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
1051 struct vmap_block
*vb
;
1054 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
1057 spin_lock(&vb
->lock
);
1058 i
= find_first_bit(vb
->dirty_map
, VMAP_BBMAP_BITS
);
1059 while (i
< VMAP_BBMAP_BITS
) {
1062 j
= find_next_zero_bit(vb
->dirty_map
,
1063 VMAP_BBMAP_BITS
, i
);
1065 s
= vb
->va
->va_start
+ (i
<< PAGE_SHIFT
);
1066 e
= vb
->va
->va_start
+ (j
<< PAGE_SHIFT
);
1075 i
= find_next_bit(vb
->dirty_map
,
1076 VMAP_BBMAP_BITS
, i
);
1078 spin_unlock(&vb
->lock
);
1083 __purge_vmap_area_lazy(&start
, &end
, 1, flush
);
1085 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
1088 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1089 * @mem: the pointer returned by vm_map_ram
1090 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1092 void vm_unmap_ram(const void *mem
, unsigned int count
)
1094 unsigned long size
= count
<< PAGE_SHIFT
;
1095 unsigned long addr
= (unsigned long)mem
;
1098 BUG_ON(addr
< VMALLOC_START
);
1099 BUG_ON(addr
> VMALLOC_END
);
1100 BUG_ON(addr
& (PAGE_SIZE
-1));
1102 debug_check_no_locks_freed(mem
, size
);
1103 vmap_debug_free_range(addr
, addr
+size
);
1105 if (likely(count
<= VMAP_MAX_ALLOC
))
1108 free_unmap_vmap_area_addr(addr
);
1110 EXPORT_SYMBOL(vm_unmap_ram
);
1113 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1114 * @pages: an array of pointers to the pages to be mapped
1115 * @count: number of pages
1116 * @node: prefer to allocate data structures on this node
1117 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1119 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1121 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
, pgprot_t prot
)
1123 unsigned long size
= count
<< PAGE_SHIFT
;
1127 if (likely(count
<= VMAP_MAX_ALLOC
)) {
1128 mem
= vb_alloc(size
, GFP_KERNEL
);
1131 addr
= (unsigned long)mem
;
1133 struct vmap_area
*va
;
1134 va
= alloc_vmap_area(size
, PAGE_SIZE
,
1135 VMALLOC_START
, VMALLOC_END
, node
, GFP_KERNEL
);
1139 addr
= va
->va_start
;
1142 if (vmap_page_range(addr
, addr
+ size
, prot
, pages
) < 0) {
1143 vm_unmap_ram(mem
, count
);
1148 EXPORT_SYMBOL(vm_map_ram
);
1150 static struct vm_struct
*vmlist __initdata
;
1152 * vm_area_add_early - add vmap area early during boot
1153 * @vm: vm_struct to add
1155 * This function is used to add fixed kernel vm area to vmlist before
1156 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1157 * should contain proper values and the other fields should be zero.
1159 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1161 void __init
vm_area_add_early(struct vm_struct
*vm
)
1163 struct vm_struct
*tmp
, **p
;
1165 BUG_ON(vmap_initialized
);
1166 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
1167 if (tmp
->addr
>= vm
->addr
) {
1168 BUG_ON(tmp
->addr
< vm
->addr
+ vm
->size
);
1171 BUG_ON(tmp
->addr
+ tmp
->size
> vm
->addr
);
1178 * vm_area_register_early - register vmap area early during boot
1179 * @vm: vm_struct to register
1180 * @align: requested alignment
1182 * This function is used to register kernel vm area before
1183 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1184 * proper values on entry and other fields should be zero. On return,
1185 * vm->addr contains the allocated address.
1187 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1189 void __init
vm_area_register_early(struct vm_struct
*vm
, size_t align
)
1191 static size_t vm_init_off __initdata
;
1194 addr
= ALIGN(VMALLOC_START
+ vm_init_off
, align
);
1195 vm_init_off
= PFN_ALIGN(addr
+ vm
->size
) - VMALLOC_START
;
1197 vm
->addr
= (void *)addr
;
1199 vm_area_add_early(vm
);
1202 void __init
vmalloc_init(void)
1204 struct vmap_area
*va
;
1205 struct vm_struct
*tmp
;
1208 for_each_possible_cpu(i
) {
1209 struct vmap_block_queue
*vbq
;
1210 struct vfree_deferred
*p
;
1212 vbq
= &per_cpu(vmap_block_queue
, i
);
1213 spin_lock_init(&vbq
->lock
);
1214 INIT_LIST_HEAD(&vbq
->free
);
1215 p
= &per_cpu(vfree_deferred
, i
);
1216 init_llist_head(&p
->list
);
1217 INIT_WORK(&p
->wq
, free_work
);
1220 /* Import existing vmlist entries. */
1221 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1222 va
= kzalloc(sizeof(struct vmap_area
), GFP_NOWAIT
);
1223 va
->flags
= VM_VM_AREA
;
1224 va
->va_start
= (unsigned long)tmp
->addr
;
1225 va
->va_end
= va
->va_start
+ tmp
->size
;
1227 __insert_vmap_area(va
);
1230 vmap_area_pcpu_hole
= VMALLOC_END
;
1232 vmap_initialized
= true;
1236 * map_kernel_range_noflush - map kernel VM area with the specified pages
1237 * @addr: start of the VM area to map
1238 * @size: size of the VM area to map
1239 * @prot: page protection flags to use
1240 * @pages: pages to map
1242 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1243 * specify should have been allocated using get_vm_area() and its
1247 * This function does NOT do any cache flushing. The caller is
1248 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1249 * before calling this function.
1252 * The number of pages mapped on success, -errno on failure.
1254 int map_kernel_range_noflush(unsigned long addr
, unsigned long size
,
1255 pgprot_t prot
, struct page
**pages
)
1257 return vmap_page_range_noflush(addr
, addr
+ size
, prot
, pages
);
1261 * unmap_kernel_range_noflush - unmap kernel VM area
1262 * @addr: start of the VM area to unmap
1263 * @size: size of the VM area to unmap
1265 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1266 * specify should have been allocated using get_vm_area() and its
1270 * This function does NOT do any cache flushing. The caller is
1271 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1272 * before calling this function and flush_tlb_kernel_range() after.
1274 void unmap_kernel_range_noflush(unsigned long addr
, unsigned long size
)
1276 vunmap_page_range(addr
, addr
+ size
);
1278 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush
);
1281 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1282 * @addr: start of the VM area to unmap
1283 * @size: size of the VM area to unmap
1285 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1286 * the unmapping and tlb after.
1288 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
1290 unsigned long end
= addr
+ size
;
1292 flush_cache_vunmap(addr
, end
);
1293 vunmap_page_range(addr
, end
);
1294 flush_tlb_kernel_range(addr
, end
);
1297 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
***pages
)
1299 unsigned long addr
= (unsigned long)area
->addr
;
1300 unsigned long end
= addr
+ area
->size
- PAGE_SIZE
;
1303 err
= vmap_page_range(addr
, end
, prot
, *pages
);
1311 EXPORT_SYMBOL_GPL(map_vm_area
);
1313 static void setup_vmalloc_vm(struct vm_struct
*vm
, struct vmap_area
*va
,
1314 unsigned long flags
, const void *caller
)
1316 spin_lock(&vmap_area_lock
);
1318 vm
->addr
= (void *)va
->va_start
;
1319 vm
->size
= va
->va_end
- va
->va_start
;
1320 vm
->caller
= caller
;
1322 va
->flags
|= VM_VM_AREA
;
1323 spin_unlock(&vmap_area_lock
);
1326 static void clear_vm_unlist(struct vm_struct
*vm
)
1329 * Before removing VM_UNLIST,
1330 * we should make sure that vm has proper values.
1331 * Pair with smp_rmb() in show_numa_info().
1334 vm
->flags
&= ~VM_UNLIST
;
1337 static void insert_vmalloc_vm(struct vm_struct
*vm
, struct vmap_area
*va
,
1338 unsigned long flags
, const void *caller
)
1340 setup_vmalloc_vm(vm
, va
, flags
, caller
);
1341 clear_vm_unlist(vm
);
1344 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
1345 unsigned long align
, unsigned long flags
, unsigned long start
,
1346 unsigned long end
, int node
, gfp_t gfp_mask
, const void *caller
)
1348 struct vmap_area
*va
;
1349 struct vm_struct
*area
;
1351 BUG_ON(in_interrupt());
1352 if (flags
& VM_IOREMAP
) {
1353 int bit
= fls(size
);
1355 if (bit
> IOREMAP_MAX_ORDER
)
1356 bit
= IOREMAP_MAX_ORDER
;
1357 else if (bit
< PAGE_SHIFT
)
1363 size
= PAGE_ALIGN(size
);
1364 if (unlikely(!size
))
1367 area
= kzalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
1368 if (unlikely(!area
))
1372 * We always allocate a guard page.
1376 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
);
1383 * When this function is called from __vmalloc_node_range,
1384 * we add VM_UNLIST flag to avoid accessing uninitialized
1385 * members of vm_struct such as pages and nr_pages fields.
1386 * They will be set later.
1388 if (flags
& VM_UNLIST
)
1389 setup_vmalloc_vm(area
, va
, flags
, caller
);
1391 insert_vmalloc_vm(area
, va
, flags
, caller
);
1396 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
1397 unsigned long start
, unsigned long end
)
1399 return __get_vm_area_node(size
, 1, flags
, start
, end
, NUMA_NO_NODE
,
1400 GFP_KERNEL
, __builtin_return_address(0));
1402 EXPORT_SYMBOL_GPL(__get_vm_area
);
1404 struct vm_struct
*__get_vm_area_caller(unsigned long size
, unsigned long flags
,
1405 unsigned long start
, unsigned long end
,
1408 return __get_vm_area_node(size
, 1, flags
, start
, end
, NUMA_NO_NODE
,
1409 GFP_KERNEL
, caller
);
1413 * get_vm_area - reserve a contiguous kernel virtual area
1414 * @size: size of the area
1415 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1417 * Search an area of @size in the kernel virtual mapping area,
1418 * and reserved it for out purposes. Returns the area descriptor
1419 * on success or %NULL on failure.
1421 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
1423 return __get_vm_area_node(size
, 1, flags
, VMALLOC_START
, VMALLOC_END
,
1424 NUMA_NO_NODE
, GFP_KERNEL
,
1425 __builtin_return_address(0));
1428 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
1431 return __get_vm_area_node(size
, 1, flags
, VMALLOC_START
, VMALLOC_END
,
1432 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
1436 * find_vm_area - find a continuous kernel virtual area
1437 * @addr: base address
1439 * Search for the kernel VM area starting at @addr, and return it.
1440 * It is up to the caller to do all required locking to keep the returned
1443 struct vm_struct
*find_vm_area(const void *addr
)
1445 struct vmap_area
*va
;
1447 va
= find_vmap_area((unsigned long)addr
);
1448 if (va
&& va
->flags
& VM_VM_AREA
)
1455 * remove_vm_area - find and remove a continuous kernel virtual area
1456 * @addr: base address
1458 * Search for the kernel VM area starting at @addr, and remove it.
1459 * This function returns the found VM area, but using it is NOT safe
1460 * on SMP machines, except for its size or flags.
1462 struct vm_struct
*remove_vm_area(const void *addr
)
1464 struct vmap_area
*va
;
1466 va
= find_vmap_area((unsigned long)addr
);
1467 if (va
&& va
->flags
& VM_VM_AREA
) {
1468 struct vm_struct
*vm
= va
->vm
;
1470 spin_lock(&vmap_area_lock
);
1472 va
->flags
&= ~VM_VM_AREA
;
1473 spin_unlock(&vmap_area_lock
);
1475 vmap_debug_free_range(va
->va_start
, va
->va_end
);
1476 free_unmap_vmap_area(va
);
1477 vm
->size
-= PAGE_SIZE
;
1484 static void __vunmap(const void *addr
, int deallocate_pages
)
1486 struct vm_struct
*area
;
1491 if ((PAGE_SIZE
-1) & (unsigned long)addr
) {
1492 WARN(1, KERN_ERR
"Trying to vfree() bad address (%p)\n", addr
);
1496 area
= remove_vm_area(addr
);
1497 if (unlikely(!area
)) {
1498 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
1503 debug_check_no_locks_freed(addr
, area
->size
);
1504 debug_check_no_obj_freed(addr
, area
->size
);
1506 if (deallocate_pages
) {
1509 for (i
= 0; i
< area
->nr_pages
; i
++) {
1510 struct page
*page
= area
->pages
[i
];
1516 if (area
->flags
& VM_VPAGES
)
1527 * vfree - release memory allocated by vmalloc()
1528 * @addr: memory base address
1530 * Free the virtually continuous memory area starting at @addr, as
1531 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1532 * NULL, no operation is performed.
1534 * Must not be called in NMI context (strictly speaking, only if we don't
1535 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1536 * conventions for vfree() arch-depenedent would be a really bad idea)
1538 * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
1541 void vfree(const void *addr
)
1545 kmemleak_free(addr
);
1549 if (unlikely(in_interrupt())) {
1550 struct vfree_deferred
*p
= &__get_cpu_var(vfree_deferred
);
1551 llist_add((struct llist_node
*)addr
, &p
->list
);
1552 schedule_work(&p
->wq
);
1556 EXPORT_SYMBOL(vfree
);
1559 * vunmap - release virtual mapping obtained by vmap()
1560 * @addr: memory base address
1562 * Free the virtually contiguous memory area starting at @addr,
1563 * which was created from the page array passed to vmap().
1565 * Must not be called in interrupt context.
1567 void vunmap(const void *addr
)
1569 BUG_ON(in_interrupt());
1574 EXPORT_SYMBOL(vunmap
);
1577 * vmap - map an array of pages into virtually contiguous space
1578 * @pages: array of page pointers
1579 * @count: number of pages to map
1580 * @flags: vm_area->flags
1581 * @prot: page protection for the mapping
1583 * Maps @count pages from @pages into contiguous kernel virtual
1586 void *vmap(struct page
**pages
, unsigned int count
,
1587 unsigned long flags
, pgprot_t prot
)
1589 struct vm_struct
*area
;
1593 if (count
> totalram_pages
)
1596 area
= get_vm_area_caller((count
<< PAGE_SHIFT
), flags
,
1597 __builtin_return_address(0));
1601 if (map_vm_area(area
, prot
, &pages
)) {
1608 EXPORT_SYMBOL(vmap
);
1610 static void *__vmalloc_node(unsigned long size
, unsigned long align
,
1611 gfp_t gfp_mask
, pgprot_t prot
,
1612 int node
, const void *caller
);
1613 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
1614 pgprot_t prot
, int node
, const void *caller
)
1616 const int order
= 0;
1617 struct page
**pages
;
1618 unsigned int nr_pages
, array_size
, i
;
1619 gfp_t nested_gfp
= (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
;
1621 nr_pages
= (area
->size
- PAGE_SIZE
) >> PAGE_SHIFT
;
1622 array_size
= (nr_pages
* sizeof(struct page
*));
1624 area
->nr_pages
= nr_pages
;
1625 /* Please note that the recursion is strictly bounded. */
1626 if (array_size
> PAGE_SIZE
) {
1627 pages
= __vmalloc_node(array_size
, 1, nested_gfp
|__GFP_HIGHMEM
,
1628 PAGE_KERNEL
, node
, caller
);
1629 area
->flags
|= VM_VPAGES
;
1631 pages
= kmalloc_node(array_size
, nested_gfp
, node
);
1633 area
->pages
= pages
;
1634 area
->caller
= caller
;
1636 remove_vm_area(area
->addr
);
1641 for (i
= 0; i
< area
->nr_pages
; i
++) {
1643 gfp_t tmp_mask
= gfp_mask
| __GFP_NOWARN
;
1646 page
= alloc_page(tmp_mask
);
1648 page
= alloc_pages_node(node
, tmp_mask
, order
);
1650 if (unlikely(!page
)) {
1651 /* Successfully allocated i pages, free them in __vunmap() */
1655 area
->pages
[i
] = page
;
1658 if (map_vm_area(area
, prot
, &pages
))
1663 warn_alloc_failed(gfp_mask
, order
,
1664 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1665 (area
->nr_pages
*PAGE_SIZE
), area
->size
);
1671 * __vmalloc_node_range - allocate virtually contiguous memory
1672 * @size: allocation size
1673 * @align: desired alignment
1674 * @start: vm area range start
1675 * @end: vm area range end
1676 * @gfp_mask: flags for the page level allocator
1677 * @prot: protection mask for the allocated pages
1678 * @node: node to use for allocation or NUMA_NO_NODE
1679 * @caller: caller's return address
1681 * Allocate enough pages to cover @size from the page level
1682 * allocator with @gfp_mask flags. Map them into contiguous
1683 * kernel virtual space, using a pagetable protection of @prot.
1685 void *__vmalloc_node_range(unsigned long size
, unsigned long align
,
1686 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
1687 pgprot_t prot
, int node
, const void *caller
)
1689 struct vm_struct
*area
;
1691 unsigned long real_size
= size
;
1693 size
= PAGE_ALIGN(size
);
1694 if (!size
|| (size
>> PAGE_SHIFT
) > totalram_pages
)
1697 area
= __get_vm_area_node(size
, align
, VM_ALLOC
| VM_UNLIST
,
1698 start
, end
, node
, gfp_mask
, caller
);
1702 addr
= __vmalloc_area_node(area
, gfp_mask
, prot
, node
, caller
);
1707 * In this function, newly allocated vm_struct has VM_UNLIST flag.
1708 * It means that vm_struct is not fully initialized.
1709 * Now, it is fully initialized, so remove this flag here.
1711 clear_vm_unlist(area
);
1714 * A ref_count = 3 is needed because the vm_struct and vmap_area
1715 * structures allocated in the __get_vm_area_node() function contain
1716 * references to the virtual address of the vmalloc'ed block.
1718 kmemleak_alloc(addr
, real_size
, 3, gfp_mask
);
1723 warn_alloc_failed(gfp_mask
, 0,
1724 "vmalloc: allocation failure: %lu bytes\n",
1730 * __vmalloc_node - allocate virtually contiguous memory
1731 * @size: allocation size
1732 * @align: desired alignment
1733 * @gfp_mask: flags for the page level allocator
1734 * @prot: protection mask for the allocated pages
1735 * @node: node to use for allocation or NUMA_NO_NODE
1736 * @caller: caller's return address
1738 * Allocate enough pages to cover @size from the page level
1739 * allocator with @gfp_mask flags. Map them into contiguous
1740 * kernel virtual space, using a pagetable protection of @prot.
1742 static void *__vmalloc_node(unsigned long size
, unsigned long align
,
1743 gfp_t gfp_mask
, pgprot_t prot
,
1744 int node
, const void *caller
)
1746 return __vmalloc_node_range(size
, align
, VMALLOC_START
, VMALLOC_END
,
1747 gfp_mask
, prot
, node
, caller
);
1750 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
1752 return __vmalloc_node(size
, 1, gfp_mask
, prot
, NUMA_NO_NODE
,
1753 __builtin_return_address(0));
1755 EXPORT_SYMBOL(__vmalloc
);
1757 static inline void *__vmalloc_node_flags(unsigned long size
,
1758 int node
, gfp_t flags
)
1760 return __vmalloc_node(size
, 1, flags
, PAGE_KERNEL
,
1761 node
, __builtin_return_address(0));
1765 * vmalloc - allocate virtually contiguous memory
1766 * @size: allocation size
1767 * Allocate enough pages to cover @size from the page level
1768 * allocator and map them into contiguous kernel virtual space.
1770 * For tight control over page level allocator and protection flags
1771 * use __vmalloc() instead.
1773 void *vmalloc(unsigned long size
)
1775 return __vmalloc_node_flags(size
, NUMA_NO_NODE
,
1776 GFP_KERNEL
| __GFP_HIGHMEM
);
1778 EXPORT_SYMBOL(vmalloc
);
1781 * vzalloc - allocate virtually contiguous memory with zero fill
1782 * @size: allocation size
1783 * Allocate enough pages to cover @size from the page level
1784 * allocator and map them into contiguous kernel virtual space.
1785 * The memory allocated is set to zero.
1787 * For tight control over page level allocator and protection flags
1788 * use __vmalloc() instead.
1790 void *vzalloc(unsigned long size
)
1792 return __vmalloc_node_flags(size
, NUMA_NO_NODE
,
1793 GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
);
1795 EXPORT_SYMBOL(vzalloc
);
1798 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1799 * @size: allocation size
1801 * The resulting memory area is zeroed so it can be mapped to userspace
1802 * without leaking data.
1804 void *vmalloc_user(unsigned long size
)
1806 struct vm_struct
*area
;
1809 ret
= __vmalloc_node(size
, SHMLBA
,
1810 GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
,
1811 PAGE_KERNEL
, NUMA_NO_NODE
,
1812 __builtin_return_address(0));
1814 area
= find_vm_area(ret
);
1815 area
->flags
|= VM_USERMAP
;
1819 EXPORT_SYMBOL(vmalloc_user
);
1822 * vmalloc_node - allocate memory on a specific node
1823 * @size: allocation size
1826 * Allocate enough pages to cover @size from the page level
1827 * allocator and map them into contiguous kernel virtual space.
1829 * For tight control over page level allocator and protection flags
1830 * use __vmalloc() instead.
1832 void *vmalloc_node(unsigned long size
, int node
)
1834 return __vmalloc_node(size
, 1, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL
,
1835 node
, __builtin_return_address(0));
1837 EXPORT_SYMBOL(vmalloc_node
);
1840 * vzalloc_node - allocate memory on a specific node with zero fill
1841 * @size: allocation size
1844 * Allocate enough pages to cover @size from the page level
1845 * allocator and map them into contiguous kernel virtual space.
1846 * The memory allocated is set to zero.
1848 * For tight control over page level allocator and protection flags
1849 * use __vmalloc_node() instead.
1851 void *vzalloc_node(unsigned long size
, int node
)
1853 return __vmalloc_node_flags(size
, node
,
1854 GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
);
1856 EXPORT_SYMBOL(vzalloc_node
);
1858 #ifndef PAGE_KERNEL_EXEC
1859 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1863 * vmalloc_exec - allocate virtually contiguous, executable memory
1864 * @size: allocation size
1866 * Kernel-internal function to allocate enough pages to cover @size
1867 * the page level allocator and map them into contiguous and
1868 * executable kernel virtual space.
1870 * For tight control over page level allocator and protection flags
1871 * use __vmalloc() instead.
1874 void *vmalloc_exec(unsigned long size
)
1876 return __vmalloc_node(size
, 1, GFP_KERNEL
| __GFP_HIGHMEM
, PAGE_KERNEL_EXEC
,
1877 NUMA_NO_NODE
, __builtin_return_address(0));
1880 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1881 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1882 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1883 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1885 #define GFP_VMALLOC32 GFP_KERNEL
1889 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1890 * @size: allocation size
1892 * Allocate enough 32bit PA addressable pages to cover @size from the
1893 * page level allocator and map them into contiguous kernel virtual space.
1895 void *vmalloc_32(unsigned long size
)
1897 return __vmalloc_node(size
, 1, GFP_VMALLOC32
, PAGE_KERNEL
,
1898 NUMA_NO_NODE
, __builtin_return_address(0));
1900 EXPORT_SYMBOL(vmalloc_32
);
1903 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1904 * @size: allocation size
1906 * The resulting memory area is 32bit addressable and zeroed so it can be
1907 * mapped to userspace without leaking data.
1909 void *vmalloc_32_user(unsigned long size
)
1911 struct vm_struct
*area
;
1914 ret
= __vmalloc_node(size
, 1, GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
,
1915 NUMA_NO_NODE
, __builtin_return_address(0));
1917 area
= find_vm_area(ret
);
1918 area
->flags
|= VM_USERMAP
;
1922 EXPORT_SYMBOL(vmalloc_32_user
);
1925 * small helper routine , copy contents to buf from addr.
1926 * If the page is not present, fill zero.
1929 static int aligned_vread(char *buf
, char *addr
, unsigned long count
)
1935 unsigned long offset
, length
;
1937 offset
= (unsigned long)addr
& ~PAGE_MASK
;
1938 length
= PAGE_SIZE
- offset
;
1941 p
= vmalloc_to_page(addr
);
1943 * To do safe access to this _mapped_ area, we need
1944 * lock. But adding lock here means that we need to add
1945 * overhead of vmalloc()/vfree() calles for this _debug_
1946 * interface, rarely used. Instead of that, we'll use
1947 * kmap() and get small overhead in this access function.
1951 * we can expect USER0 is not used (see vread/vwrite's
1952 * function description)
1954 void *map
= kmap_atomic(p
);
1955 memcpy(buf
, map
+ offset
, length
);
1958 memset(buf
, 0, length
);
1968 static int aligned_vwrite(char *buf
, char *addr
, unsigned long count
)
1974 unsigned long offset
, length
;
1976 offset
= (unsigned long)addr
& ~PAGE_MASK
;
1977 length
= PAGE_SIZE
- offset
;
1980 p
= vmalloc_to_page(addr
);
1982 * To do safe access to this _mapped_ area, we need
1983 * lock. But adding lock here means that we need to add
1984 * overhead of vmalloc()/vfree() calles for this _debug_
1985 * interface, rarely used. Instead of that, we'll use
1986 * kmap() and get small overhead in this access function.
1990 * we can expect USER0 is not used (see vread/vwrite's
1991 * function description)
1993 void *map
= kmap_atomic(p
);
1994 memcpy(map
+ offset
, buf
, length
);
2006 * vread() - read vmalloc area in a safe way.
2007 * @buf: buffer for reading data
2008 * @addr: vm address.
2009 * @count: number of bytes to be read.
2011 * Returns # of bytes which addr and buf should be increased.
2012 * (same number to @count). Returns 0 if [addr...addr+count) doesn't
2013 * includes any intersect with alive vmalloc area.
2015 * This function checks that addr is a valid vmalloc'ed area, and
2016 * copy data from that area to a given buffer. If the given memory range
2017 * of [addr...addr+count) includes some valid address, data is copied to
2018 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2019 * IOREMAP area is treated as memory hole and no copy is done.
2021 * If [addr...addr+count) doesn't includes any intersects with alive
2022 * vm_struct area, returns 0. @buf should be kernel's buffer.
2024 * Note: In usual ops, vread() is never necessary because the caller
2025 * should know vmalloc() area is valid and can use memcpy().
2026 * This is for routines which have to access vmalloc area without
2027 * any informaion, as /dev/kmem.
2031 long vread(char *buf
, char *addr
, unsigned long count
)
2033 struct vmap_area
*va
;
2034 struct vm_struct
*vm
;
2035 char *vaddr
, *buf_start
= buf
;
2036 unsigned long buflen
= count
;
2039 /* Don't allow overflow */
2040 if ((unsigned long) addr
+ count
< count
)
2041 count
= -(unsigned long) addr
;
2043 spin_lock(&vmap_area_lock
);
2044 list_for_each_entry(va
, &vmap_area_list
, list
) {
2048 if (!(va
->flags
& VM_VM_AREA
))
2052 vaddr
= (char *) vm
->addr
;
2053 if (addr
>= vaddr
+ vm
->size
- PAGE_SIZE
)
2055 while (addr
< vaddr
) {
2063 n
= vaddr
+ vm
->size
- PAGE_SIZE
- addr
;
2066 if (!(vm
->flags
& VM_IOREMAP
))
2067 aligned_vread(buf
, addr
, n
);
2068 else /* IOREMAP area is treated as memory hole */
2075 spin_unlock(&vmap_area_lock
);
2077 if (buf
== buf_start
)
2079 /* zero-fill memory holes */
2080 if (buf
!= buf_start
+ buflen
)
2081 memset(buf
, 0, buflen
- (buf
- buf_start
));
2087 * vwrite() - write vmalloc area in a safe way.
2088 * @buf: buffer for source data
2089 * @addr: vm address.
2090 * @count: number of bytes to be read.
2092 * Returns # of bytes which addr and buf should be incresed.
2093 * (same number to @count).
2094 * If [addr...addr+count) doesn't includes any intersect with valid
2095 * vmalloc area, returns 0.
2097 * This function checks that addr is a valid vmalloc'ed area, and
2098 * copy data from a buffer to the given addr. If specified range of
2099 * [addr...addr+count) includes some valid address, data is copied from
2100 * proper area of @buf. If there are memory holes, no copy to hole.
2101 * IOREMAP area is treated as memory hole and no copy is done.
2103 * If [addr...addr+count) doesn't includes any intersects with alive
2104 * vm_struct area, returns 0. @buf should be kernel's buffer.
2106 * Note: In usual ops, vwrite() is never necessary because the caller
2107 * should know vmalloc() area is valid and can use memcpy().
2108 * This is for routines which have to access vmalloc area without
2109 * any informaion, as /dev/kmem.
2112 long vwrite(char *buf
, char *addr
, unsigned long count
)
2114 struct vmap_area
*va
;
2115 struct vm_struct
*vm
;
2117 unsigned long n
, buflen
;
2120 /* Don't allow overflow */
2121 if ((unsigned long) addr
+ count
< count
)
2122 count
= -(unsigned long) addr
;
2125 spin_lock(&vmap_area_lock
);
2126 list_for_each_entry(va
, &vmap_area_list
, list
) {
2130 if (!(va
->flags
& VM_VM_AREA
))
2134 vaddr
= (char *) vm
->addr
;
2135 if (addr
>= vaddr
+ vm
->size
- PAGE_SIZE
)
2137 while (addr
< vaddr
) {
2144 n
= vaddr
+ vm
->size
- PAGE_SIZE
- addr
;
2147 if (!(vm
->flags
& VM_IOREMAP
)) {
2148 aligned_vwrite(buf
, addr
, n
);
2156 spin_unlock(&vmap_area_lock
);
2163 * remap_vmalloc_range - map vmalloc pages to userspace
2164 * @vma: vma to cover (map full range of vma)
2165 * @addr: vmalloc memory
2166 * @pgoff: number of pages into addr before first page to map
2168 * Returns: 0 for success, -Exxx on failure
2170 * This function checks that addr is a valid vmalloc'ed area, and
2171 * that it is big enough to cover the vma. Will return failure if
2172 * that criteria isn't met.
2174 * Similar to remap_pfn_range() (see mm/memory.c)
2176 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
2177 unsigned long pgoff
)
2179 struct vm_struct
*area
;
2180 unsigned long uaddr
= vma
->vm_start
;
2181 unsigned long usize
= vma
->vm_end
- vma
->vm_start
;
2183 if ((PAGE_SIZE
-1) & (unsigned long)addr
)
2186 area
= find_vm_area(addr
);
2190 if (!(area
->flags
& VM_USERMAP
))
2193 if (usize
+ (pgoff
<< PAGE_SHIFT
) > area
->size
- PAGE_SIZE
)
2196 addr
+= pgoff
<< PAGE_SHIFT
;
2198 struct page
*page
= vmalloc_to_page(addr
);
2201 ret
= vm_insert_page(vma
, uaddr
, page
);
2208 } while (usize
> 0);
2210 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
2214 EXPORT_SYMBOL(remap_vmalloc_range
);
2217 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
2220 void __attribute__((weak
)) vmalloc_sync_all(void)
2225 static int f(pte_t
*pte
, pgtable_t table
, unsigned long addr
, void *data
)
2237 * alloc_vm_area - allocate a range of kernel address space
2238 * @size: size of the area
2239 * @ptes: returns the PTEs for the address space
2241 * Returns: NULL on failure, vm_struct on success
2243 * This function reserves a range of kernel address space, and
2244 * allocates pagetables to map that range. No actual mappings
2247 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2248 * allocated for the VM area are returned.
2250 struct vm_struct
*alloc_vm_area(size_t size
, pte_t
**ptes
)
2252 struct vm_struct
*area
;
2254 area
= get_vm_area_caller(size
, VM_IOREMAP
,
2255 __builtin_return_address(0));
2260 * This ensures that page tables are constructed for this region
2261 * of kernel virtual address space and mapped into init_mm.
2263 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
2264 size
, f
, ptes
? &ptes
: NULL
)) {
2271 EXPORT_SYMBOL_GPL(alloc_vm_area
);
2273 void free_vm_area(struct vm_struct
*area
)
2275 struct vm_struct
*ret
;
2276 ret
= remove_vm_area(area
->addr
);
2277 BUG_ON(ret
!= area
);
2280 EXPORT_SYMBOL_GPL(free_vm_area
);
2283 static struct vmap_area
*node_to_va(struct rb_node
*n
)
2285 return n
? rb_entry(n
, struct vmap_area
, rb_node
) : NULL
;
2289 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2290 * @end: target address
2291 * @pnext: out arg for the next vmap_area
2292 * @pprev: out arg for the previous vmap_area
2294 * Returns: %true if either or both of next and prev are found,
2295 * %false if no vmap_area exists
2297 * Find vmap_areas end addresses of which enclose @end. ie. if not
2298 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2300 static bool pvm_find_next_prev(unsigned long end
,
2301 struct vmap_area
**pnext
,
2302 struct vmap_area
**pprev
)
2304 struct rb_node
*n
= vmap_area_root
.rb_node
;
2305 struct vmap_area
*va
= NULL
;
2308 va
= rb_entry(n
, struct vmap_area
, rb_node
);
2309 if (end
< va
->va_end
)
2311 else if (end
> va
->va_end
)
2320 if (va
->va_end
> end
) {
2322 *pprev
= node_to_va(rb_prev(&(*pnext
)->rb_node
));
2325 *pnext
= node_to_va(rb_next(&(*pprev
)->rb_node
));
2331 * pvm_determine_end - find the highest aligned address between two vmap_areas
2332 * @pnext: in/out arg for the next vmap_area
2333 * @pprev: in/out arg for the previous vmap_area
2336 * Returns: determined end address
2338 * Find the highest aligned address between *@pnext and *@pprev below
2339 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
2340 * down address is between the end addresses of the two vmap_areas.
2342 * Please note that the address returned by this function may fall
2343 * inside *@pnext vmap_area. The caller is responsible for checking
2346 static unsigned long pvm_determine_end(struct vmap_area
**pnext
,
2347 struct vmap_area
**pprev
,
2348 unsigned long align
)
2350 const unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
2354 addr
= min((*pnext
)->va_start
& ~(align
- 1), vmalloc_end
);
2358 while (*pprev
&& (*pprev
)->va_end
> addr
) {
2360 *pprev
= node_to_va(rb_prev(&(*pnext
)->rb_node
));
2367 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2368 * @offsets: array containing offset of each area
2369 * @sizes: array containing size of each area
2370 * @nr_vms: the number of areas to allocate
2371 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2373 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2374 * vm_structs on success, %NULL on failure
2376 * Percpu allocator wants to use congruent vm areas so that it can
2377 * maintain the offsets among percpu areas. This function allocates
2378 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
2379 * be scattered pretty far, distance between two areas easily going up
2380 * to gigabytes. To avoid interacting with regular vmallocs, these
2381 * areas are allocated from top.
2383 * Despite its complicated look, this allocator is rather simple. It
2384 * does everything top-down and scans areas from the end looking for
2385 * matching slot. While scanning, if any of the areas overlaps with
2386 * existing vmap_area, the base address is pulled down to fit the
2387 * area. Scanning is repeated till all the areas fit and then all
2388 * necessary data structres are inserted and the result is returned.
2390 struct vm_struct
**pcpu_get_vm_areas(const unsigned long *offsets
,
2391 const size_t *sizes
, int nr_vms
,
2394 const unsigned long vmalloc_start
= ALIGN(VMALLOC_START
, align
);
2395 const unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
2396 struct vmap_area
**vas
, *prev
, *next
;
2397 struct vm_struct
**vms
;
2398 int area
, area2
, last_area
, term_area
;
2399 unsigned long base
, start
, end
, last_end
;
2400 bool purged
= false;
2402 /* verify parameters and allocate data structures */
2403 BUG_ON(align
& ~PAGE_MASK
|| !is_power_of_2(align
));
2404 for (last_area
= 0, area
= 0; area
< nr_vms
; area
++) {
2405 start
= offsets
[area
];
2406 end
= start
+ sizes
[area
];
2408 /* is everything aligned properly? */
2409 BUG_ON(!IS_ALIGNED(offsets
[area
], align
));
2410 BUG_ON(!IS_ALIGNED(sizes
[area
], align
));
2412 /* detect the area with the highest address */
2413 if (start
> offsets
[last_area
])
2416 for (area2
= 0; area2
< nr_vms
; area2
++) {
2417 unsigned long start2
= offsets
[area2
];
2418 unsigned long end2
= start2
+ sizes
[area2
];
2423 BUG_ON(start2
>= start
&& start2
< end
);
2424 BUG_ON(end2
<= end
&& end2
> start
);
2427 last_end
= offsets
[last_area
] + sizes
[last_area
];
2429 if (vmalloc_end
- vmalloc_start
< last_end
) {
2434 vms
= kcalloc(nr_vms
, sizeof(vms
[0]), GFP_KERNEL
);
2435 vas
= kcalloc(nr_vms
, sizeof(vas
[0]), GFP_KERNEL
);
2439 for (area
= 0; area
< nr_vms
; area
++) {
2440 vas
[area
] = kzalloc(sizeof(struct vmap_area
), GFP_KERNEL
);
2441 vms
[area
] = kzalloc(sizeof(struct vm_struct
), GFP_KERNEL
);
2442 if (!vas
[area
] || !vms
[area
])
2446 spin_lock(&vmap_area_lock
);
2448 /* start scanning - we scan from the top, begin with the last area */
2449 area
= term_area
= last_area
;
2450 start
= offsets
[area
];
2451 end
= start
+ sizes
[area
];
2453 if (!pvm_find_next_prev(vmap_area_pcpu_hole
, &next
, &prev
)) {
2454 base
= vmalloc_end
- last_end
;
2457 base
= pvm_determine_end(&next
, &prev
, align
) - end
;
2460 BUG_ON(next
&& next
->va_end
<= base
+ end
);
2461 BUG_ON(prev
&& prev
->va_end
> base
+ end
);
2464 * base might have underflowed, add last_end before
2467 if (base
+ last_end
< vmalloc_start
+ last_end
) {
2468 spin_unlock(&vmap_area_lock
);
2470 purge_vmap_area_lazy();
2478 * If next overlaps, move base downwards so that it's
2479 * right below next and then recheck.
2481 if (next
&& next
->va_start
< base
+ end
) {
2482 base
= pvm_determine_end(&next
, &prev
, align
) - end
;
2488 * If prev overlaps, shift down next and prev and move
2489 * base so that it's right below new next and then
2492 if (prev
&& prev
->va_end
> base
+ start
) {
2494 prev
= node_to_va(rb_prev(&next
->rb_node
));
2495 base
= pvm_determine_end(&next
, &prev
, align
) - end
;
2501 * This area fits, move on to the previous one. If
2502 * the previous one is the terminal one, we're done.
2504 area
= (area
+ nr_vms
- 1) % nr_vms
;
2505 if (area
== term_area
)
2507 start
= offsets
[area
];
2508 end
= start
+ sizes
[area
];
2509 pvm_find_next_prev(base
+ end
, &next
, &prev
);
2512 /* we've found a fitting base, insert all va's */
2513 for (area
= 0; area
< nr_vms
; area
++) {
2514 struct vmap_area
*va
= vas
[area
];
2516 va
->va_start
= base
+ offsets
[area
];
2517 va
->va_end
= va
->va_start
+ sizes
[area
];
2518 __insert_vmap_area(va
);
2521 vmap_area_pcpu_hole
= base
+ offsets
[last_area
];
2523 spin_unlock(&vmap_area_lock
);
2525 /* insert all vm's */
2526 for (area
= 0; area
< nr_vms
; area
++)
2527 insert_vmalloc_vm(vms
[area
], vas
[area
], VM_ALLOC
,
2534 for (area
= 0; area
< nr_vms
; area
++) {
2545 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2546 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2547 * @nr_vms: the number of allocated areas
2549 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2551 void pcpu_free_vm_areas(struct vm_struct
**vms
, int nr_vms
)
2555 for (i
= 0; i
< nr_vms
; i
++)
2556 free_vm_area(vms
[i
]);
2559 #endif /* CONFIG_SMP */
2561 #ifdef CONFIG_PROC_FS
2562 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
2563 __acquires(&vmap_area_lock
)
2566 struct vmap_area
*va
;
2568 spin_lock(&vmap_area_lock
);
2569 va
= list_entry((&vmap_area_list
)->next
, typeof(*va
), list
);
2570 while (n
> 0 && &va
->list
!= &vmap_area_list
) {
2572 va
= list_entry(va
->list
.next
, typeof(*va
), list
);
2574 if (!n
&& &va
->list
!= &vmap_area_list
)
2581 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
2583 struct vmap_area
*va
= p
, *next
;
2586 next
= list_entry(va
->list
.next
, typeof(*va
), list
);
2587 if (&next
->list
!= &vmap_area_list
)
2593 static void s_stop(struct seq_file
*m
, void *p
)
2594 __releases(&vmap_area_lock
)
2596 spin_unlock(&vmap_area_lock
);
2599 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
2601 if (IS_ENABLED(CONFIG_NUMA
)) {
2602 unsigned int nr
, *counters
= m
->private;
2607 /* Pair with smp_wmb() in clear_vm_unlist() */
2609 if (v
->flags
& VM_UNLIST
)
2612 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
2614 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
2615 counters
[page_to_nid(v
->pages
[nr
])]++;
2617 for_each_node_state(nr
, N_HIGH_MEMORY
)
2619 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
2623 static int s_show(struct seq_file
*m
, void *p
)
2625 struct vmap_area
*va
= p
;
2626 struct vm_struct
*v
;
2628 if (va
->flags
& (VM_LAZY_FREE
| VM_LAZY_FREEING
))
2631 if (!(va
->flags
& VM_VM_AREA
)) {
2632 seq_printf(m
, "0x%pK-0x%pK %7ld vm_map_ram\n",
2633 (void *)va
->va_start
, (void *)va
->va_end
,
2634 va
->va_end
- va
->va_start
);
2640 seq_printf(m
, "0x%pK-0x%pK %7ld",
2641 v
->addr
, v
->addr
+ v
->size
, v
->size
);
2644 seq_printf(m
, " %pS", v
->caller
);
2647 seq_printf(m
, " pages=%d", v
->nr_pages
);
2650 seq_printf(m
, " phys=%llx", (unsigned long long)v
->phys_addr
);
2652 if (v
->flags
& VM_IOREMAP
)
2653 seq_printf(m
, " ioremap");
2655 if (v
->flags
& VM_ALLOC
)
2656 seq_printf(m
, " vmalloc");
2658 if (v
->flags
& VM_MAP
)
2659 seq_printf(m
, " vmap");
2661 if (v
->flags
& VM_USERMAP
)
2662 seq_printf(m
, " user");
2664 if (v
->flags
& VM_VPAGES
)
2665 seq_printf(m
, " vpages");
2667 show_numa_info(m
, v
);
2672 static const struct seq_operations vmalloc_op
= {
2679 static int vmalloc_open(struct inode
*inode
, struct file
*file
)
2681 unsigned int *ptr
= NULL
;
2684 if (IS_ENABLED(CONFIG_NUMA
)) {
2685 ptr
= kmalloc(nr_node_ids
* sizeof(unsigned int), GFP_KERNEL
);
2689 ret
= seq_open(file
, &vmalloc_op
);
2691 struct seq_file
*m
= file
->private_data
;
2698 static const struct file_operations proc_vmalloc_operations
= {
2699 .open
= vmalloc_open
,
2701 .llseek
= seq_lseek
,
2702 .release
= seq_release_private
,
2705 static int __init
proc_vmalloc_init(void)
2707 proc_create("vmallocinfo", S_IRUSR
, NULL
, &proc_vmalloc_operations
);
2710 module_init(proc_vmalloc_init
);
2712 void get_vmalloc_info(struct vmalloc_info
*vmi
)
2714 struct vmap_area
*va
;
2715 unsigned long free_area_size
;
2716 unsigned long prev_end
;
2719 vmi
->largest_chunk
= 0;
2721 prev_end
= VMALLOC_START
;
2723 spin_lock(&vmap_area_lock
);
2725 if (list_empty(&vmap_area_list
)) {
2726 vmi
->largest_chunk
= VMALLOC_TOTAL
;
2730 list_for_each_entry(va
, &vmap_area_list
, list
) {
2731 unsigned long addr
= va
->va_start
;
2734 * Some archs keep another range for modules in vmalloc space
2736 if (addr
< VMALLOC_START
)
2738 if (addr
>= VMALLOC_END
)
2741 if (va
->flags
& (VM_LAZY_FREE
| VM_LAZY_FREEING
))
2744 vmi
->used
+= (va
->va_end
- va
->va_start
);
2746 free_area_size
= addr
- prev_end
;
2747 if (vmi
->largest_chunk
< free_area_size
)
2748 vmi
->largest_chunk
= free_area_size
;
2750 prev_end
= va
->va_end
;
2753 if (VMALLOC_END
- prev_end
> vmi
->largest_chunk
)
2754 vmi
->largest_chunk
= VMALLOC_END
- prev_end
;
2757 spin_unlock(&vmap_area_lock
);