Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / hugetlb.c
1 /*
2 * Generic hugetlb support.
3 * (C) Nadia Yvette Chambers, April 2004
4 */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlb.h>
28
29 #include <linux/io.h>
30 #include <linux/hugetlb.h>
31 #include <linux/hugetlb_cgroup.h>
32 #include <linux/node.h>
33 #include "internal.h"
34
35 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
36 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
37 unsigned long hugepages_treat_as_movable;
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42
43 __initdata LIST_HEAD(huge_boot_pages);
44
45 /* for command line parsing */
46 static struct hstate * __initdata parsed_hstate;
47 static unsigned long __initdata default_hstate_max_huge_pages;
48 static unsigned long __initdata default_hstate_size;
49
50 /*
51 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
52 */
53 DEFINE_SPINLOCK(hugetlb_lock);
54
55 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
56 {
57 bool free = (spool->count == 0) && (spool->used_hpages == 0);
58
59 spin_unlock(&spool->lock);
60
61 /* If no pages are used, and no other handles to the subpool
62 * remain, free the subpool the subpool remain */
63 if (free)
64 kfree(spool);
65 }
66
67 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
68 {
69 struct hugepage_subpool *spool;
70
71 spool = kmalloc(sizeof(*spool), GFP_KERNEL);
72 if (!spool)
73 return NULL;
74
75 spin_lock_init(&spool->lock);
76 spool->count = 1;
77 spool->max_hpages = nr_blocks;
78 spool->used_hpages = 0;
79
80 return spool;
81 }
82
83 void hugepage_put_subpool(struct hugepage_subpool *spool)
84 {
85 spin_lock(&spool->lock);
86 BUG_ON(!spool->count);
87 spool->count--;
88 unlock_or_release_subpool(spool);
89 }
90
91 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
92 long delta)
93 {
94 int ret = 0;
95
96 if (!spool)
97 return 0;
98
99 spin_lock(&spool->lock);
100 if ((spool->used_hpages + delta) <= spool->max_hpages) {
101 spool->used_hpages += delta;
102 } else {
103 ret = -ENOMEM;
104 }
105 spin_unlock(&spool->lock);
106
107 return ret;
108 }
109
110 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
111 long delta)
112 {
113 if (!spool)
114 return;
115
116 spin_lock(&spool->lock);
117 spool->used_hpages -= delta;
118 /* If hugetlbfs_put_super couldn't free spool due to
119 * an outstanding quota reference, free it now. */
120 unlock_or_release_subpool(spool);
121 }
122
123 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
124 {
125 return HUGETLBFS_SB(inode->i_sb)->spool;
126 }
127
128 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
129 {
130 return subpool_inode(file_inode(vma->vm_file));
131 }
132
133 /*
134 * Region tracking -- allows tracking of reservations and instantiated pages
135 * across the pages in a mapping.
136 *
137 * The region data structures are protected by a combination of the mmap_sem
138 * and the hugetlb_instantion_mutex. To access or modify a region the caller
139 * must either hold the mmap_sem for write, or the mmap_sem for read and
140 * the hugetlb_instantiation mutex:
141 *
142 * down_write(&mm->mmap_sem);
143 * or
144 * down_read(&mm->mmap_sem);
145 * mutex_lock(&hugetlb_instantiation_mutex);
146 */
147 struct file_region {
148 struct list_head link;
149 long from;
150 long to;
151 };
152
153 static long region_add(struct list_head *head, long f, long t)
154 {
155 struct file_region *rg, *nrg, *trg;
156
157 /* Locate the region we are either in or before. */
158 list_for_each_entry(rg, head, link)
159 if (f <= rg->to)
160 break;
161
162 /* Round our left edge to the current segment if it encloses us. */
163 if (f > rg->from)
164 f = rg->from;
165
166 /* Check for and consume any regions we now overlap with. */
167 nrg = rg;
168 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
169 if (&rg->link == head)
170 break;
171 if (rg->from > t)
172 break;
173
174 /* If this area reaches higher then extend our area to
175 * include it completely. If this is not the first area
176 * which we intend to reuse, free it. */
177 if (rg->to > t)
178 t = rg->to;
179 if (rg != nrg) {
180 list_del(&rg->link);
181 kfree(rg);
182 }
183 }
184 nrg->from = f;
185 nrg->to = t;
186 return 0;
187 }
188
189 static long region_chg(struct list_head *head, long f, long t)
190 {
191 struct file_region *rg, *nrg;
192 long chg = 0;
193
194 /* Locate the region we are before or in. */
195 list_for_each_entry(rg, head, link)
196 if (f <= rg->to)
197 break;
198
199 /* If we are below the current region then a new region is required.
200 * Subtle, allocate a new region at the position but make it zero
201 * size such that we can guarantee to record the reservation. */
202 if (&rg->link == head || t < rg->from) {
203 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
204 if (!nrg)
205 return -ENOMEM;
206 nrg->from = f;
207 nrg->to = f;
208 INIT_LIST_HEAD(&nrg->link);
209 list_add(&nrg->link, rg->link.prev);
210
211 return t - f;
212 }
213
214 /* Round our left edge to the current segment if it encloses us. */
215 if (f > rg->from)
216 f = rg->from;
217 chg = t - f;
218
219 /* Check for and consume any regions we now overlap with. */
220 list_for_each_entry(rg, rg->link.prev, link) {
221 if (&rg->link == head)
222 break;
223 if (rg->from > t)
224 return chg;
225
226 /* We overlap with this area, if it extends further than
227 * us then we must extend ourselves. Account for its
228 * existing reservation. */
229 if (rg->to > t) {
230 chg += rg->to - t;
231 t = rg->to;
232 }
233 chg -= rg->to - rg->from;
234 }
235 return chg;
236 }
237
238 static long region_truncate(struct list_head *head, long end)
239 {
240 struct file_region *rg, *trg;
241 long chg = 0;
242
243 /* Locate the region we are either in or before. */
244 list_for_each_entry(rg, head, link)
245 if (end <= rg->to)
246 break;
247 if (&rg->link == head)
248 return 0;
249
250 /* If we are in the middle of a region then adjust it. */
251 if (end > rg->from) {
252 chg = rg->to - end;
253 rg->to = end;
254 rg = list_entry(rg->link.next, typeof(*rg), link);
255 }
256
257 /* Drop any remaining regions. */
258 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
259 if (&rg->link == head)
260 break;
261 chg += rg->to - rg->from;
262 list_del(&rg->link);
263 kfree(rg);
264 }
265 return chg;
266 }
267
268 static long region_count(struct list_head *head, long f, long t)
269 {
270 struct file_region *rg;
271 long chg = 0;
272
273 /* Locate each segment we overlap with, and count that overlap. */
274 list_for_each_entry(rg, head, link) {
275 long seg_from;
276 long seg_to;
277
278 if (rg->to <= f)
279 continue;
280 if (rg->from >= t)
281 break;
282
283 seg_from = max(rg->from, f);
284 seg_to = min(rg->to, t);
285
286 chg += seg_to - seg_from;
287 }
288
289 return chg;
290 }
291
292 /*
293 * Convert the address within this vma to the page offset within
294 * the mapping, in pagecache page units; huge pages here.
295 */
296 static pgoff_t vma_hugecache_offset(struct hstate *h,
297 struct vm_area_struct *vma, unsigned long address)
298 {
299 return ((address - vma->vm_start) >> huge_page_shift(h)) +
300 (vma->vm_pgoff >> huge_page_order(h));
301 }
302
303 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
304 unsigned long address)
305 {
306 return vma_hugecache_offset(hstate_vma(vma), vma, address);
307 }
308
309 /*
310 * Return the size of the pages allocated when backing a VMA. In the majority
311 * cases this will be same size as used by the page table entries.
312 */
313 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
314 {
315 struct hstate *hstate;
316
317 if (!is_vm_hugetlb_page(vma))
318 return PAGE_SIZE;
319
320 hstate = hstate_vma(vma);
321
322 return 1UL << (hstate->order + PAGE_SHIFT);
323 }
324 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
325
326 /*
327 * Return the page size being used by the MMU to back a VMA. In the majority
328 * of cases, the page size used by the kernel matches the MMU size. On
329 * architectures where it differs, an architecture-specific version of this
330 * function is required.
331 */
332 #ifndef vma_mmu_pagesize
333 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
334 {
335 return vma_kernel_pagesize(vma);
336 }
337 #endif
338
339 /*
340 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
341 * bits of the reservation map pointer, which are always clear due to
342 * alignment.
343 */
344 #define HPAGE_RESV_OWNER (1UL << 0)
345 #define HPAGE_RESV_UNMAPPED (1UL << 1)
346 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
347
348 /*
349 * These helpers are used to track how many pages are reserved for
350 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
351 * is guaranteed to have their future faults succeed.
352 *
353 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
354 * the reserve counters are updated with the hugetlb_lock held. It is safe
355 * to reset the VMA at fork() time as it is not in use yet and there is no
356 * chance of the global counters getting corrupted as a result of the values.
357 *
358 * The private mapping reservation is represented in a subtly different
359 * manner to a shared mapping. A shared mapping has a region map associated
360 * with the underlying file, this region map represents the backing file
361 * pages which have ever had a reservation assigned which this persists even
362 * after the page is instantiated. A private mapping has a region map
363 * associated with the original mmap which is attached to all VMAs which
364 * reference it, this region map represents those offsets which have consumed
365 * reservation ie. where pages have been instantiated.
366 */
367 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
368 {
369 return (unsigned long)vma->vm_private_data;
370 }
371
372 static void set_vma_private_data(struct vm_area_struct *vma,
373 unsigned long value)
374 {
375 vma->vm_private_data = (void *)value;
376 }
377
378 struct resv_map {
379 struct kref refs;
380 struct list_head regions;
381 };
382
383 static struct resv_map *resv_map_alloc(void)
384 {
385 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
386 if (!resv_map)
387 return NULL;
388
389 kref_init(&resv_map->refs);
390 INIT_LIST_HEAD(&resv_map->regions);
391
392 return resv_map;
393 }
394
395 static void resv_map_release(struct kref *ref)
396 {
397 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
398
399 /* Clear out any active regions before we release the map. */
400 region_truncate(&resv_map->regions, 0);
401 kfree(resv_map);
402 }
403
404 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
405 {
406 VM_BUG_ON(!is_vm_hugetlb_page(vma));
407 if (!(vma->vm_flags & VM_MAYSHARE))
408 return (struct resv_map *)(get_vma_private_data(vma) &
409 ~HPAGE_RESV_MASK);
410 return NULL;
411 }
412
413 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
414 {
415 VM_BUG_ON(!is_vm_hugetlb_page(vma));
416 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
417
418 set_vma_private_data(vma, (get_vma_private_data(vma) &
419 HPAGE_RESV_MASK) | (unsigned long)map);
420 }
421
422 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
423 {
424 VM_BUG_ON(!is_vm_hugetlb_page(vma));
425 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
426
427 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
428 }
429
430 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
431 {
432 VM_BUG_ON(!is_vm_hugetlb_page(vma));
433
434 return (get_vma_private_data(vma) & flag) != 0;
435 }
436
437 /* Decrement the reserved pages in the hugepage pool by one */
438 static void decrement_hugepage_resv_vma(struct hstate *h,
439 struct vm_area_struct *vma)
440 {
441 if (vma->vm_flags & VM_NORESERVE)
442 return;
443
444 if (vma->vm_flags & VM_MAYSHARE) {
445 /* Shared mappings always use reserves */
446 h->resv_huge_pages--;
447 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
448 /*
449 * Only the process that called mmap() has reserves for
450 * private mappings.
451 */
452 h->resv_huge_pages--;
453 }
454 }
455
456 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
457 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
458 {
459 VM_BUG_ON(!is_vm_hugetlb_page(vma));
460 if (!(vma->vm_flags & VM_MAYSHARE))
461 vma->vm_private_data = (void *)0;
462 }
463
464 /* Returns true if the VMA has associated reserve pages */
465 static int vma_has_reserves(struct vm_area_struct *vma)
466 {
467 if (vma->vm_flags & VM_MAYSHARE)
468 return 1;
469 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
470 return 1;
471 return 0;
472 }
473
474 static void copy_gigantic_page(struct page *dst, struct page *src)
475 {
476 int i;
477 struct hstate *h = page_hstate(src);
478 struct page *dst_base = dst;
479 struct page *src_base = src;
480
481 for (i = 0; i < pages_per_huge_page(h); ) {
482 cond_resched();
483 copy_highpage(dst, src);
484
485 i++;
486 dst = mem_map_next(dst, dst_base, i);
487 src = mem_map_next(src, src_base, i);
488 }
489 }
490
491 void copy_huge_page(struct page *dst, struct page *src)
492 {
493 int i;
494 struct hstate *h = page_hstate(src);
495
496 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
497 copy_gigantic_page(dst, src);
498 return;
499 }
500
501 might_sleep();
502 for (i = 0; i < pages_per_huge_page(h); i++) {
503 cond_resched();
504 copy_highpage(dst + i, src + i);
505 }
506 }
507
508 static void enqueue_huge_page(struct hstate *h, struct page *page)
509 {
510 int nid = page_to_nid(page);
511 list_move(&page->lru, &h->hugepage_freelists[nid]);
512 h->free_huge_pages++;
513 h->free_huge_pages_node[nid]++;
514 }
515
516 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
517 {
518 struct page *page;
519
520 if (list_empty(&h->hugepage_freelists[nid]))
521 return NULL;
522 page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
523 list_move(&page->lru, &h->hugepage_activelist);
524 set_page_refcounted(page);
525 h->free_huge_pages--;
526 h->free_huge_pages_node[nid]--;
527 return page;
528 }
529
530 static struct page *dequeue_huge_page_vma(struct hstate *h,
531 struct vm_area_struct *vma,
532 unsigned long address, int avoid_reserve)
533 {
534 struct page *page = NULL;
535 struct mempolicy *mpol;
536 nodemask_t *nodemask;
537 struct zonelist *zonelist;
538 struct zone *zone;
539 struct zoneref *z;
540 unsigned int cpuset_mems_cookie;
541
542 retry_cpuset:
543 cpuset_mems_cookie = get_mems_allowed();
544 zonelist = huge_zonelist(vma, address,
545 htlb_alloc_mask, &mpol, &nodemask);
546 /*
547 * A child process with MAP_PRIVATE mappings created by their parent
548 * have no page reserves. This check ensures that reservations are
549 * not "stolen". The child may still get SIGKILLed
550 */
551 if (!vma_has_reserves(vma) &&
552 h->free_huge_pages - h->resv_huge_pages == 0)
553 goto err;
554
555 /* If reserves cannot be used, ensure enough pages are in the pool */
556 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
557 goto err;
558
559 for_each_zone_zonelist_nodemask(zone, z, zonelist,
560 MAX_NR_ZONES - 1, nodemask) {
561 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
562 page = dequeue_huge_page_node(h, zone_to_nid(zone));
563 if (page) {
564 if (!avoid_reserve)
565 decrement_hugepage_resv_vma(h, vma);
566 break;
567 }
568 }
569 }
570
571 mpol_cond_put(mpol);
572 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
573 goto retry_cpuset;
574 return page;
575
576 err:
577 mpol_cond_put(mpol);
578 return NULL;
579 }
580
581 static void update_and_free_page(struct hstate *h, struct page *page)
582 {
583 int i;
584
585 VM_BUG_ON(h->order >= MAX_ORDER);
586
587 h->nr_huge_pages--;
588 h->nr_huge_pages_node[page_to_nid(page)]--;
589 for (i = 0; i < pages_per_huge_page(h); i++) {
590 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
591 1 << PG_referenced | 1 << PG_dirty |
592 1 << PG_active | 1 << PG_reserved |
593 1 << PG_private | 1 << PG_writeback);
594 }
595 VM_BUG_ON(hugetlb_cgroup_from_page(page));
596 set_compound_page_dtor(page, NULL);
597 set_page_refcounted(page);
598 arch_release_hugepage(page);
599 __free_pages(page, huge_page_order(h));
600 }
601
602 struct hstate *size_to_hstate(unsigned long size)
603 {
604 struct hstate *h;
605
606 for_each_hstate(h) {
607 if (huge_page_size(h) == size)
608 return h;
609 }
610 return NULL;
611 }
612
613 static void free_huge_page(struct page *page)
614 {
615 /*
616 * Can't pass hstate in here because it is called from the
617 * compound page destructor.
618 */
619 struct hstate *h = page_hstate(page);
620 int nid = page_to_nid(page);
621 struct hugepage_subpool *spool =
622 (struct hugepage_subpool *)page_private(page);
623
624 set_page_private(page, 0);
625 page->mapping = NULL;
626 BUG_ON(page_count(page));
627 BUG_ON(page_mapcount(page));
628
629 spin_lock(&hugetlb_lock);
630 hugetlb_cgroup_uncharge_page(hstate_index(h),
631 pages_per_huge_page(h), page);
632 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
633 /* remove the page from active list */
634 list_del(&page->lru);
635 update_and_free_page(h, page);
636 h->surplus_huge_pages--;
637 h->surplus_huge_pages_node[nid]--;
638 } else {
639 arch_clear_hugepage_flags(page);
640 enqueue_huge_page(h, page);
641 }
642 spin_unlock(&hugetlb_lock);
643 hugepage_subpool_put_pages(spool, 1);
644 }
645
646 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
647 {
648 INIT_LIST_HEAD(&page->lru);
649 set_compound_page_dtor(page, free_huge_page);
650 spin_lock(&hugetlb_lock);
651 set_hugetlb_cgroup(page, NULL);
652 h->nr_huge_pages++;
653 h->nr_huge_pages_node[nid]++;
654 spin_unlock(&hugetlb_lock);
655 put_page(page); /* free it into the hugepage allocator */
656 }
657
658 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
659 {
660 int i;
661 int nr_pages = 1 << order;
662 struct page *p = page + 1;
663
664 /* we rely on prep_new_huge_page to set the destructor */
665 set_compound_order(page, order);
666 __SetPageHead(page);
667 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
668 __SetPageTail(p);
669 set_page_count(p, 0);
670 p->first_page = page;
671 }
672 }
673
674 /*
675 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
676 * transparent huge pages. See the PageTransHuge() documentation for more
677 * details.
678 */
679 int PageHuge(struct page *page)
680 {
681 compound_page_dtor *dtor;
682
683 if (!PageCompound(page))
684 return 0;
685
686 page = compound_head(page);
687 dtor = get_compound_page_dtor(page);
688
689 return dtor == free_huge_page;
690 }
691 EXPORT_SYMBOL_GPL(PageHuge);
692
693 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
694 {
695 struct page *page;
696
697 if (h->order >= MAX_ORDER)
698 return NULL;
699
700 page = alloc_pages_exact_node(nid,
701 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
702 __GFP_REPEAT|__GFP_NOWARN,
703 huge_page_order(h));
704 if (page) {
705 if (arch_prepare_hugepage(page)) {
706 __free_pages(page, huge_page_order(h));
707 return NULL;
708 }
709 prep_new_huge_page(h, page, nid);
710 }
711
712 return page;
713 }
714
715 /*
716 * common helper functions for hstate_next_node_to_{alloc|free}.
717 * We may have allocated or freed a huge page based on a different
718 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
719 * be outside of *nodes_allowed. Ensure that we use an allowed
720 * node for alloc or free.
721 */
722 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
723 {
724 nid = next_node(nid, *nodes_allowed);
725 if (nid == MAX_NUMNODES)
726 nid = first_node(*nodes_allowed);
727 VM_BUG_ON(nid >= MAX_NUMNODES);
728
729 return nid;
730 }
731
732 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
733 {
734 if (!node_isset(nid, *nodes_allowed))
735 nid = next_node_allowed(nid, nodes_allowed);
736 return nid;
737 }
738
739 /*
740 * returns the previously saved node ["this node"] from which to
741 * allocate a persistent huge page for the pool and advance the
742 * next node from which to allocate, handling wrap at end of node
743 * mask.
744 */
745 static int hstate_next_node_to_alloc(struct hstate *h,
746 nodemask_t *nodes_allowed)
747 {
748 int nid;
749
750 VM_BUG_ON(!nodes_allowed);
751
752 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
753 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
754
755 return nid;
756 }
757
758 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
759 {
760 struct page *page;
761 int start_nid;
762 int next_nid;
763 int ret = 0;
764
765 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
766 next_nid = start_nid;
767
768 do {
769 page = alloc_fresh_huge_page_node(h, next_nid);
770 if (page) {
771 ret = 1;
772 break;
773 }
774 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
775 } while (next_nid != start_nid);
776
777 if (ret)
778 count_vm_event(HTLB_BUDDY_PGALLOC);
779 else
780 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
781
782 return ret;
783 }
784
785 /*
786 * helper for free_pool_huge_page() - return the previously saved
787 * node ["this node"] from which to free a huge page. Advance the
788 * next node id whether or not we find a free huge page to free so
789 * that the next attempt to free addresses the next node.
790 */
791 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
792 {
793 int nid;
794
795 VM_BUG_ON(!nodes_allowed);
796
797 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
798 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
799
800 return nid;
801 }
802
803 /*
804 * Free huge page from pool from next node to free.
805 * Attempt to keep persistent huge pages more or less
806 * balanced over allowed nodes.
807 * Called with hugetlb_lock locked.
808 */
809 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
810 bool acct_surplus)
811 {
812 int start_nid;
813 int next_nid;
814 int ret = 0;
815
816 start_nid = hstate_next_node_to_free(h, nodes_allowed);
817 next_nid = start_nid;
818
819 do {
820 /*
821 * If we're returning unused surplus pages, only examine
822 * nodes with surplus pages.
823 */
824 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
825 !list_empty(&h->hugepage_freelists[next_nid])) {
826 struct page *page =
827 list_entry(h->hugepage_freelists[next_nid].next,
828 struct page, lru);
829 list_del(&page->lru);
830 h->free_huge_pages--;
831 h->free_huge_pages_node[next_nid]--;
832 if (acct_surplus) {
833 h->surplus_huge_pages--;
834 h->surplus_huge_pages_node[next_nid]--;
835 }
836 update_and_free_page(h, page);
837 ret = 1;
838 break;
839 }
840 next_nid = hstate_next_node_to_free(h, nodes_allowed);
841 } while (next_nid != start_nid);
842
843 return ret;
844 }
845
846 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
847 {
848 struct page *page;
849 unsigned int r_nid;
850
851 if (h->order >= MAX_ORDER)
852 return NULL;
853
854 /*
855 * Assume we will successfully allocate the surplus page to
856 * prevent racing processes from causing the surplus to exceed
857 * overcommit
858 *
859 * This however introduces a different race, where a process B
860 * tries to grow the static hugepage pool while alloc_pages() is
861 * called by process A. B will only examine the per-node
862 * counters in determining if surplus huge pages can be
863 * converted to normal huge pages in adjust_pool_surplus(). A
864 * won't be able to increment the per-node counter, until the
865 * lock is dropped by B, but B doesn't drop hugetlb_lock until
866 * no more huge pages can be converted from surplus to normal
867 * state (and doesn't try to convert again). Thus, we have a
868 * case where a surplus huge page exists, the pool is grown, and
869 * the surplus huge page still exists after, even though it
870 * should just have been converted to a normal huge page. This
871 * does not leak memory, though, as the hugepage will be freed
872 * once it is out of use. It also does not allow the counters to
873 * go out of whack in adjust_pool_surplus() as we don't modify
874 * the node values until we've gotten the hugepage and only the
875 * per-node value is checked there.
876 */
877 spin_lock(&hugetlb_lock);
878 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
879 spin_unlock(&hugetlb_lock);
880 return NULL;
881 } else {
882 h->nr_huge_pages++;
883 h->surplus_huge_pages++;
884 }
885 spin_unlock(&hugetlb_lock);
886
887 if (nid == NUMA_NO_NODE)
888 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
889 __GFP_REPEAT|__GFP_NOWARN,
890 huge_page_order(h));
891 else
892 page = alloc_pages_exact_node(nid,
893 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
894 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
895
896 if (page && arch_prepare_hugepage(page)) {
897 __free_pages(page, huge_page_order(h));
898 page = NULL;
899 }
900
901 spin_lock(&hugetlb_lock);
902 if (page) {
903 INIT_LIST_HEAD(&page->lru);
904 r_nid = page_to_nid(page);
905 set_compound_page_dtor(page, free_huge_page);
906 set_hugetlb_cgroup(page, NULL);
907 /*
908 * We incremented the global counters already
909 */
910 h->nr_huge_pages_node[r_nid]++;
911 h->surplus_huge_pages_node[r_nid]++;
912 __count_vm_event(HTLB_BUDDY_PGALLOC);
913 } else {
914 h->nr_huge_pages--;
915 h->surplus_huge_pages--;
916 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
917 }
918 spin_unlock(&hugetlb_lock);
919
920 return page;
921 }
922
923 /*
924 * This allocation function is useful in the context where vma is irrelevant.
925 * E.g. soft-offlining uses this function because it only cares physical
926 * address of error page.
927 */
928 struct page *alloc_huge_page_node(struct hstate *h, int nid)
929 {
930 struct page *page;
931
932 spin_lock(&hugetlb_lock);
933 page = dequeue_huge_page_node(h, nid);
934 spin_unlock(&hugetlb_lock);
935
936 if (!page)
937 page = alloc_buddy_huge_page(h, nid);
938
939 return page;
940 }
941
942 /*
943 * Increase the hugetlb pool such that it can accommodate a reservation
944 * of size 'delta'.
945 */
946 static int gather_surplus_pages(struct hstate *h, int delta)
947 {
948 struct list_head surplus_list;
949 struct page *page, *tmp;
950 int ret, i;
951 int needed, allocated;
952 bool alloc_ok = true;
953
954 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
955 if (needed <= 0) {
956 h->resv_huge_pages += delta;
957 return 0;
958 }
959
960 allocated = 0;
961 INIT_LIST_HEAD(&surplus_list);
962
963 ret = -ENOMEM;
964 retry:
965 spin_unlock(&hugetlb_lock);
966 for (i = 0; i < needed; i++) {
967 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
968 if (!page) {
969 alloc_ok = false;
970 break;
971 }
972 list_add(&page->lru, &surplus_list);
973 }
974 allocated += i;
975
976 /*
977 * After retaking hugetlb_lock, we need to recalculate 'needed'
978 * because either resv_huge_pages or free_huge_pages may have changed.
979 */
980 spin_lock(&hugetlb_lock);
981 needed = (h->resv_huge_pages + delta) -
982 (h->free_huge_pages + allocated);
983 if (needed > 0) {
984 if (alloc_ok)
985 goto retry;
986 /*
987 * We were not able to allocate enough pages to
988 * satisfy the entire reservation so we free what
989 * we've allocated so far.
990 */
991 goto free;
992 }
993 /*
994 * The surplus_list now contains _at_least_ the number of extra pages
995 * needed to accommodate the reservation. Add the appropriate number
996 * of pages to the hugetlb pool and free the extras back to the buddy
997 * allocator. Commit the entire reservation here to prevent another
998 * process from stealing the pages as they are added to the pool but
999 * before they are reserved.
1000 */
1001 needed += allocated;
1002 h->resv_huge_pages += delta;
1003 ret = 0;
1004
1005 /* Free the needed pages to the hugetlb pool */
1006 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1007 if ((--needed) < 0)
1008 break;
1009 /*
1010 * This page is now managed by the hugetlb allocator and has
1011 * no users -- drop the buddy allocator's reference.
1012 */
1013 put_page_testzero(page);
1014 VM_BUG_ON(page_count(page));
1015 enqueue_huge_page(h, page);
1016 }
1017 free:
1018 spin_unlock(&hugetlb_lock);
1019
1020 /* Free unnecessary surplus pages to the buddy allocator */
1021 if (!list_empty(&surplus_list)) {
1022 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1023 put_page(page);
1024 }
1025 }
1026 spin_lock(&hugetlb_lock);
1027
1028 return ret;
1029 }
1030
1031 /*
1032 * When releasing a hugetlb pool reservation, any surplus pages that were
1033 * allocated to satisfy the reservation must be explicitly freed if they were
1034 * never used.
1035 * Called with hugetlb_lock held.
1036 */
1037 static void return_unused_surplus_pages(struct hstate *h,
1038 unsigned long unused_resv_pages)
1039 {
1040 unsigned long nr_pages;
1041
1042 /* Uncommit the reservation */
1043 h->resv_huge_pages -= unused_resv_pages;
1044
1045 /* Cannot return gigantic pages currently */
1046 if (h->order >= MAX_ORDER)
1047 return;
1048
1049 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1050
1051 /*
1052 * We want to release as many surplus pages as possible, spread
1053 * evenly across all nodes with memory. Iterate across these nodes
1054 * until we can no longer free unreserved surplus pages. This occurs
1055 * when the nodes with surplus pages have no free pages.
1056 * free_pool_huge_page() will balance the the freed pages across the
1057 * on-line nodes with memory and will handle the hstate accounting.
1058 */
1059 while (nr_pages--) {
1060 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1061 break;
1062 }
1063 }
1064
1065 /*
1066 * Determine if the huge page at addr within the vma has an associated
1067 * reservation. Where it does not we will need to logically increase
1068 * reservation and actually increase subpool usage before an allocation
1069 * can occur. Where any new reservation would be required the
1070 * reservation change is prepared, but not committed. Once the page
1071 * has been allocated from the subpool and instantiated the change should
1072 * be committed via vma_commit_reservation. No action is required on
1073 * failure.
1074 */
1075 static long vma_needs_reservation(struct hstate *h,
1076 struct vm_area_struct *vma, unsigned long addr)
1077 {
1078 struct address_space *mapping = vma->vm_file->f_mapping;
1079 struct inode *inode = mapping->host;
1080
1081 if (vma->vm_flags & VM_MAYSHARE) {
1082 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1083 return region_chg(&inode->i_mapping->private_list,
1084 idx, idx + 1);
1085
1086 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1087 return 1;
1088
1089 } else {
1090 long err;
1091 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1092 struct resv_map *reservations = vma_resv_map(vma);
1093
1094 err = region_chg(&reservations->regions, idx, idx + 1);
1095 if (err < 0)
1096 return err;
1097 return 0;
1098 }
1099 }
1100 static void vma_commit_reservation(struct hstate *h,
1101 struct vm_area_struct *vma, unsigned long addr)
1102 {
1103 struct address_space *mapping = vma->vm_file->f_mapping;
1104 struct inode *inode = mapping->host;
1105
1106 if (vma->vm_flags & VM_MAYSHARE) {
1107 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1108 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1109
1110 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1111 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1112 struct resv_map *reservations = vma_resv_map(vma);
1113
1114 /* Mark this page used in the map. */
1115 region_add(&reservations->regions, idx, idx + 1);
1116 }
1117 }
1118
1119 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1120 unsigned long addr, int avoid_reserve)
1121 {
1122 struct hugepage_subpool *spool = subpool_vma(vma);
1123 struct hstate *h = hstate_vma(vma);
1124 struct page *page;
1125 long chg;
1126 int ret, idx;
1127 struct hugetlb_cgroup *h_cg;
1128
1129 idx = hstate_index(h);
1130 /*
1131 * Processes that did not create the mapping will have no
1132 * reserves and will not have accounted against subpool
1133 * limit. Check that the subpool limit can be made before
1134 * satisfying the allocation MAP_NORESERVE mappings may also
1135 * need pages and subpool limit allocated allocated if no reserve
1136 * mapping overlaps.
1137 */
1138 chg = vma_needs_reservation(h, vma, addr);
1139 if (chg < 0)
1140 return ERR_PTR(-ENOMEM);
1141 if (chg)
1142 if (hugepage_subpool_get_pages(spool, chg))
1143 return ERR_PTR(-ENOSPC);
1144
1145 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1146 if (ret) {
1147 hugepage_subpool_put_pages(spool, chg);
1148 return ERR_PTR(-ENOSPC);
1149 }
1150 spin_lock(&hugetlb_lock);
1151 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1152 if (page) {
1153 /* update page cgroup details */
1154 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1155 h_cg, page);
1156 spin_unlock(&hugetlb_lock);
1157 } else {
1158 spin_unlock(&hugetlb_lock);
1159 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1160 if (!page) {
1161 hugetlb_cgroup_uncharge_cgroup(idx,
1162 pages_per_huge_page(h),
1163 h_cg);
1164 hugepage_subpool_put_pages(spool, chg);
1165 return ERR_PTR(-ENOSPC);
1166 }
1167 spin_lock(&hugetlb_lock);
1168 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1169 h_cg, page);
1170 list_move(&page->lru, &h->hugepage_activelist);
1171 spin_unlock(&hugetlb_lock);
1172 }
1173
1174 set_page_private(page, (unsigned long)spool);
1175
1176 vma_commit_reservation(h, vma, addr);
1177 return page;
1178 }
1179
1180 int __weak alloc_bootmem_huge_page(struct hstate *h)
1181 {
1182 struct huge_bootmem_page *m;
1183 int nr_nodes = nodes_weight(node_states[N_MEMORY]);
1184
1185 while (nr_nodes) {
1186 void *addr;
1187
1188 addr = __alloc_bootmem_node_nopanic(
1189 NODE_DATA(hstate_next_node_to_alloc(h,
1190 &node_states[N_MEMORY])),
1191 huge_page_size(h), huge_page_size(h), 0);
1192
1193 if (addr) {
1194 /*
1195 * Use the beginning of the huge page to store the
1196 * huge_bootmem_page struct (until gather_bootmem
1197 * puts them into the mem_map).
1198 */
1199 m = addr;
1200 goto found;
1201 }
1202 nr_nodes--;
1203 }
1204 return 0;
1205
1206 found:
1207 BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1208 /* Put them into a private list first because mem_map is not up yet */
1209 list_add(&m->list, &huge_boot_pages);
1210 m->hstate = h;
1211 return 1;
1212 }
1213
1214 static void prep_compound_huge_page(struct page *page, int order)
1215 {
1216 if (unlikely(order > (MAX_ORDER - 1)))
1217 prep_compound_gigantic_page(page, order);
1218 else
1219 prep_compound_page(page, order);
1220 }
1221
1222 /* Put bootmem huge pages into the standard lists after mem_map is up */
1223 static void __init gather_bootmem_prealloc(void)
1224 {
1225 struct huge_bootmem_page *m;
1226
1227 list_for_each_entry(m, &huge_boot_pages, list) {
1228 struct hstate *h = m->hstate;
1229 struct page *page;
1230
1231 #ifdef CONFIG_HIGHMEM
1232 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1233 free_bootmem_late((unsigned long)m,
1234 sizeof(struct huge_bootmem_page));
1235 #else
1236 page = virt_to_page(m);
1237 #endif
1238 __ClearPageReserved(page);
1239 WARN_ON(page_count(page) != 1);
1240 prep_compound_huge_page(page, h->order);
1241 prep_new_huge_page(h, page, page_to_nid(page));
1242 /*
1243 * If we had gigantic hugepages allocated at boot time, we need
1244 * to restore the 'stolen' pages to totalram_pages in order to
1245 * fix confusing memory reports from free(1) and another
1246 * side-effects, like CommitLimit going negative.
1247 */
1248 if (h->order > (MAX_ORDER - 1))
1249 totalram_pages += 1 << h->order;
1250 }
1251 }
1252
1253 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1254 {
1255 unsigned long i;
1256
1257 for (i = 0; i < h->max_huge_pages; ++i) {
1258 if (h->order >= MAX_ORDER) {
1259 if (!alloc_bootmem_huge_page(h))
1260 break;
1261 } else if (!alloc_fresh_huge_page(h,
1262 &node_states[N_MEMORY]))
1263 break;
1264 }
1265 h->max_huge_pages = i;
1266 }
1267
1268 static void __init hugetlb_init_hstates(void)
1269 {
1270 struct hstate *h;
1271
1272 for_each_hstate(h) {
1273 /* oversize hugepages were init'ed in early boot */
1274 if (h->order < MAX_ORDER)
1275 hugetlb_hstate_alloc_pages(h);
1276 }
1277 }
1278
1279 static char * __init memfmt(char *buf, unsigned long n)
1280 {
1281 if (n >= (1UL << 30))
1282 sprintf(buf, "%lu GB", n >> 30);
1283 else if (n >= (1UL << 20))
1284 sprintf(buf, "%lu MB", n >> 20);
1285 else
1286 sprintf(buf, "%lu KB", n >> 10);
1287 return buf;
1288 }
1289
1290 static void __init report_hugepages(void)
1291 {
1292 struct hstate *h;
1293
1294 for_each_hstate(h) {
1295 char buf[32];
1296 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1297 memfmt(buf, huge_page_size(h)),
1298 h->free_huge_pages);
1299 }
1300 }
1301
1302 #ifdef CONFIG_HIGHMEM
1303 static void try_to_free_low(struct hstate *h, unsigned long count,
1304 nodemask_t *nodes_allowed)
1305 {
1306 int i;
1307
1308 if (h->order >= MAX_ORDER)
1309 return;
1310
1311 for_each_node_mask(i, *nodes_allowed) {
1312 struct page *page, *next;
1313 struct list_head *freel = &h->hugepage_freelists[i];
1314 list_for_each_entry_safe(page, next, freel, lru) {
1315 if (count >= h->nr_huge_pages)
1316 return;
1317 if (PageHighMem(page))
1318 continue;
1319 list_del(&page->lru);
1320 update_and_free_page(h, page);
1321 h->free_huge_pages--;
1322 h->free_huge_pages_node[page_to_nid(page)]--;
1323 }
1324 }
1325 }
1326 #else
1327 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1328 nodemask_t *nodes_allowed)
1329 {
1330 }
1331 #endif
1332
1333 /*
1334 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1335 * balanced by operating on them in a round-robin fashion.
1336 * Returns 1 if an adjustment was made.
1337 */
1338 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1339 int delta)
1340 {
1341 int start_nid, next_nid;
1342 int ret = 0;
1343
1344 VM_BUG_ON(delta != -1 && delta != 1);
1345
1346 if (delta < 0)
1347 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1348 else
1349 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1350 next_nid = start_nid;
1351
1352 do {
1353 int nid = next_nid;
1354 if (delta < 0) {
1355 /*
1356 * To shrink on this node, there must be a surplus page
1357 */
1358 if (!h->surplus_huge_pages_node[nid]) {
1359 next_nid = hstate_next_node_to_alloc(h,
1360 nodes_allowed);
1361 continue;
1362 }
1363 }
1364 if (delta > 0) {
1365 /*
1366 * Surplus cannot exceed the total number of pages
1367 */
1368 if (h->surplus_huge_pages_node[nid] >=
1369 h->nr_huge_pages_node[nid]) {
1370 next_nid = hstate_next_node_to_free(h,
1371 nodes_allowed);
1372 continue;
1373 }
1374 }
1375
1376 h->surplus_huge_pages += delta;
1377 h->surplus_huge_pages_node[nid] += delta;
1378 ret = 1;
1379 break;
1380 } while (next_nid != start_nid);
1381
1382 return ret;
1383 }
1384
1385 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1386 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1387 nodemask_t *nodes_allowed)
1388 {
1389 unsigned long min_count, ret;
1390
1391 if (h->order >= MAX_ORDER)
1392 return h->max_huge_pages;
1393
1394 /*
1395 * Increase the pool size
1396 * First take pages out of surplus state. Then make up the
1397 * remaining difference by allocating fresh huge pages.
1398 *
1399 * We might race with alloc_buddy_huge_page() here and be unable
1400 * to convert a surplus huge page to a normal huge page. That is
1401 * not critical, though, it just means the overall size of the
1402 * pool might be one hugepage larger than it needs to be, but
1403 * within all the constraints specified by the sysctls.
1404 */
1405 spin_lock(&hugetlb_lock);
1406 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1407 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1408 break;
1409 }
1410
1411 while (count > persistent_huge_pages(h)) {
1412 /*
1413 * If this allocation races such that we no longer need the
1414 * page, free_huge_page will handle it by freeing the page
1415 * and reducing the surplus.
1416 */
1417 spin_unlock(&hugetlb_lock);
1418 ret = alloc_fresh_huge_page(h, nodes_allowed);
1419 spin_lock(&hugetlb_lock);
1420 if (!ret)
1421 goto out;
1422
1423 /* Bail for signals. Probably ctrl-c from user */
1424 if (signal_pending(current))
1425 goto out;
1426 }
1427
1428 /*
1429 * Decrease the pool size
1430 * First return free pages to the buddy allocator (being careful
1431 * to keep enough around to satisfy reservations). Then place
1432 * pages into surplus state as needed so the pool will shrink
1433 * to the desired size as pages become free.
1434 *
1435 * By placing pages into the surplus state independent of the
1436 * overcommit value, we are allowing the surplus pool size to
1437 * exceed overcommit. There are few sane options here. Since
1438 * alloc_buddy_huge_page() is checking the global counter,
1439 * though, we'll note that we're not allowed to exceed surplus
1440 * and won't grow the pool anywhere else. Not until one of the
1441 * sysctls are changed, or the surplus pages go out of use.
1442 */
1443 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1444 min_count = max(count, min_count);
1445 try_to_free_low(h, min_count, nodes_allowed);
1446 while (min_count < persistent_huge_pages(h)) {
1447 if (!free_pool_huge_page(h, nodes_allowed, 0))
1448 break;
1449 }
1450 while (count < persistent_huge_pages(h)) {
1451 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1452 break;
1453 }
1454 out:
1455 ret = persistent_huge_pages(h);
1456 spin_unlock(&hugetlb_lock);
1457 return ret;
1458 }
1459
1460 #define HSTATE_ATTR_RO(_name) \
1461 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1462
1463 #define HSTATE_ATTR(_name) \
1464 static struct kobj_attribute _name##_attr = \
1465 __ATTR(_name, 0644, _name##_show, _name##_store)
1466
1467 static struct kobject *hugepages_kobj;
1468 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1469
1470 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1471
1472 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1473 {
1474 int i;
1475
1476 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1477 if (hstate_kobjs[i] == kobj) {
1478 if (nidp)
1479 *nidp = NUMA_NO_NODE;
1480 return &hstates[i];
1481 }
1482
1483 return kobj_to_node_hstate(kobj, nidp);
1484 }
1485
1486 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1487 struct kobj_attribute *attr, char *buf)
1488 {
1489 struct hstate *h;
1490 unsigned long nr_huge_pages;
1491 int nid;
1492
1493 h = kobj_to_hstate(kobj, &nid);
1494 if (nid == NUMA_NO_NODE)
1495 nr_huge_pages = h->nr_huge_pages;
1496 else
1497 nr_huge_pages = h->nr_huge_pages_node[nid];
1498
1499 return sprintf(buf, "%lu\n", nr_huge_pages);
1500 }
1501
1502 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1503 struct kobject *kobj, struct kobj_attribute *attr,
1504 const char *buf, size_t len)
1505 {
1506 int err;
1507 int nid;
1508 unsigned long count;
1509 struct hstate *h;
1510 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1511
1512 err = strict_strtoul(buf, 10, &count);
1513 if (err)
1514 goto out;
1515
1516 h = kobj_to_hstate(kobj, &nid);
1517 if (h->order >= MAX_ORDER) {
1518 err = -EINVAL;
1519 goto out;
1520 }
1521
1522 if (nid == NUMA_NO_NODE) {
1523 /*
1524 * global hstate attribute
1525 */
1526 if (!(obey_mempolicy &&
1527 init_nodemask_of_mempolicy(nodes_allowed))) {
1528 NODEMASK_FREE(nodes_allowed);
1529 nodes_allowed = &node_states[N_MEMORY];
1530 }
1531 } else if (nodes_allowed) {
1532 /*
1533 * per node hstate attribute: adjust count to global,
1534 * but restrict alloc/free to the specified node.
1535 */
1536 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1537 init_nodemask_of_node(nodes_allowed, nid);
1538 } else
1539 nodes_allowed = &node_states[N_MEMORY];
1540
1541 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1542
1543 if (nodes_allowed != &node_states[N_MEMORY])
1544 NODEMASK_FREE(nodes_allowed);
1545
1546 return len;
1547 out:
1548 NODEMASK_FREE(nodes_allowed);
1549 return err;
1550 }
1551
1552 static ssize_t nr_hugepages_show(struct kobject *kobj,
1553 struct kobj_attribute *attr, char *buf)
1554 {
1555 return nr_hugepages_show_common(kobj, attr, buf);
1556 }
1557
1558 static ssize_t nr_hugepages_store(struct kobject *kobj,
1559 struct kobj_attribute *attr, const char *buf, size_t len)
1560 {
1561 return nr_hugepages_store_common(false, kobj, attr, buf, len);
1562 }
1563 HSTATE_ATTR(nr_hugepages);
1564
1565 #ifdef CONFIG_NUMA
1566
1567 /*
1568 * hstate attribute for optionally mempolicy-based constraint on persistent
1569 * huge page alloc/free.
1570 */
1571 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1572 struct kobj_attribute *attr, char *buf)
1573 {
1574 return nr_hugepages_show_common(kobj, attr, buf);
1575 }
1576
1577 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1578 struct kobj_attribute *attr, const char *buf, size_t len)
1579 {
1580 return nr_hugepages_store_common(true, kobj, attr, buf, len);
1581 }
1582 HSTATE_ATTR(nr_hugepages_mempolicy);
1583 #endif
1584
1585
1586 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1587 struct kobj_attribute *attr, char *buf)
1588 {
1589 struct hstate *h = kobj_to_hstate(kobj, NULL);
1590 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1591 }
1592
1593 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1594 struct kobj_attribute *attr, const char *buf, size_t count)
1595 {
1596 int err;
1597 unsigned long input;
1598 struct hstate *h = kobj_to_hstate(kobj, NULL);
1599
1600 if (h->order >= MAX_ORDER)
1601 return -EINVAL;
1602
1603 err = strict_strtoul(buf, 10, &input);
1604 if (err)
1605 return err;
1606
1607 spin_lock(&hugetlb_lock);
1608 h->nr_overcommit_huge_pages = input;
1609 spin_unlock(&hugetlb_lock);
1610
1611 return count;
1612 }
1613 HSTATE_ATTR(nr_overcommit_hugepages);
1614
1615 static ssize_t free_hugepages_show(struct kobject *kobj,
1616 struct kobj_attribute *attr, char *buf)
1617 {
1618 struct hstate *h;
1619 unsigned long free_huge_pages;
1620 int nid;
1621
1622 h = kobj_to_hstate(kobj, &nid);
1623 if (nid == NUMA_NO_NODE)
1624 free_huge_pages = h->free_huge_pages;
1625 else
1626 free_huge_pages = h->free_huge_pages_node[nid];
1627
1628 return sprintf(buf, "%lu\n", free_huge_pages);
1629 }
1630 HSTATE_ATTR_RO(free_hugepages);
1631
1632 static ssize_t resv_hugepages_show(struct kobject *kobj,
1633 struct kobj_attribute *attr, char *buf)
1634 {
1635 struct hstate *h = kobj_to_hstate(kobj, NULL);
1636 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1637 }
1638 HSTATE_ATTR_RO(resv_hugepages);
1639
1640 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1641 struct kobj_attribute *attr, char *buf)
1642 {
1643 struct hstate *h;
1644 unsigned long surplus_huge_pages;
1645 int nid;
1646
1647 h = kobj_to_hstate(kobj, &nid);
1648 if (nid == NUMA_NO_NODE)
1649 surplus_huge_pages = h->surplus_huge_pages;
1650 else
1651 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1652
1653 return sprintf(buf, "%lu\n", surplus_huge_pages);
1654 }
1655 HSTATE_ATTR_RO(surplus_hugepages);
1656
1657 static struct attribute *hstate_attrs[] = {
1658 &nr_hugepages_attr.attr,
1659 &nr_overcommit_hugepages_attr.attr,
1660 &free_hugepages_attr.attr,
1661 &resv_hugepages_attr.attr,
1662 &surplus_hugepages_attr.attr,
1663 #ifdef CONFIG_NUMA
1664 &nr_hugepages_mempolicy_attr.attr,
1665 #endif
1666 NULL,
1667 };
1668
1669 static struct attribute_group hstate_attr_group = {
1670 .attrs = hstate_attrs,
1671 };
1672
1673 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1674 struct kobject **hstate_kobjs,
1675 struct attribute_group *hstate_attr_group)
1676 {
1677 int retval;
1678 int hi = hstate_index(h);
1679
1680 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1681 if (!hstate_kobjs[hi])
1682 return -ENOMEM;
1683
1684 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1685 if (retval)
1686 kobject_put(hstate_kobjs[hi]);
1687
1688 return retval;
1689 }
1690
1691 static void __init hugetlb_sysfs_init(void)
1692 {
1693 struct hstate *h;
1694 int err;
1695
1696 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1697 if (!hugepages_kobj)
1698 return;
1699
1700 for_each_hstate(h) {
1701 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1702 hstate_kobjs, &hstate_attr_group);
1703 if (err)
1704 pr_err("Hugetlb: Unable to add hstate %s", h->name);
1705 }
1706 }
1707
1708 #ifdef CONFIG_NUMA
1709
1710 /*
1711 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1712 * with node devices in node_devices[] using a parallel array. The array
1713 * index of a node device or _hstate == node id.
1714 * This is here to avoid any static dependency of the node device driver, in
1715 * the base kernel, on the hugetlb module.
1716 */
1717 struct node_hstate {
1718 struct kobject *hugepages_kobj;
1719 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1720 };
1721 struct node_hstate node_hstates[MAX_NUMNODES];
1722
1723 /*
1724 * A subset of global hstate attributes for node devices
1725 */
1726 static struct attribute *per_node_hstate_attrs[] = {
1727 &nr_hugepages_attr.attr,
1728 &free_hugepages_attr.attr,
1729 &surplus_hugepages_attr.attr,
1730 NULL,
1731 };
1732
1733 static struct attribute_group per_node_hstate_attr_group = {
1734 .attrs = per_node_hstate_attrs,
1735 };
1736
1737 /*
1738 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1739 * Returns node id via non-NULL nidp.
1740 */
1741 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1742 {
1743 int nid;
1744
1745 for (nid = 0; nid < nr_node_ids; nid++) {
1746 struct node_hstate *nhs = &node_hstates[nid];
1747 int i;
1748 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1749 if (nhs->hstate_kobjs[i] == kobj) {
1750 if (nidp)
1751 *nidp = nid;
1752 return &hstates[i];
1753 }
1754 }
1755
1756 BUG();
1757 return NULL;
1758 }
1759
1760 /*
1761 * Unregister hstate attributes from a single node device.
1762 * No-op if no hstate attributes attached.
1763 */
1764 static void hugetlb_unregister_node(struct node *node)
1765 {
1766 struct hstate *h;
1767 struct node_hstate *nhs = &node_hstates[node->dev.id];
1768
1769 if (!nhs->hugepages_kobj)
1770 return; /* no hstate attributes */
1771
1772 for_each_hstate(h) {
1773 int idx = hstate_index(h);
1774 if (nhs->hstate_kobjs[idx]) {
1775 kobject_put(nhs->hstate_kobjs[idx]);
1776 nhs->hstate_kobjs[idx] = NULL;
1777 }
1778 }
1779
1780 kobject_put(nhs->hugepages_kobj);
1781 nhs->hugepages_kobj = NULL;
1782 }
1783
1784 /*
1785 * hugetlb module exit: unregister hstate attributes from node devices
1786 * that have them.
1787 */
1788 static void hugetlb_unregister_all_nodes(void)
1789 {
1790 int nid;
1791
1792 /*
1793 * disable node device registrations.
1794 */
1795 register_hugetlbfs_with_node(NULL, NULL);
1796
1797 /*
1798 * remove hstate attributes from any nodes that have them.
1799 */
1800 for (nid = 0; nid < nr_node_ids; nid++)
1801 hugetlb_unregister_node(node_devices[nid]);
1802 }
1803
1804 /*
1805 * Register hstate attributes for a single node device.
1806 * No-op if attributes already registered.
1807 */
1808 static void hugetlb_register_node(struct node *node)
1809 {
1810 struct hstate *h;
1811 struct node_hstate *nhs = &node_hstates[node->dev.id];
1812 int err;
1813
1814 if (nhs->hugepages_kobj)
1815 return; /* already allocated */
1816
1817 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1818 &node->dev.kobj);
1819 if (!nhs->hugepages_kobj)
1820 return;
1821
1822 for_each_hstate(h) {
1823 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1824 nhs->hstate_kobjs,
1825 &per_node_hstate_attr_group);
1826 if (err) {
1827 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
1828 h->name, node->dev.id);
1829 hugetlb_unregister_node(node);
1830 break;
1831 }
1832 }
1833 }
1834
1835 /*
1836 * hugetlb init time: register hstate attributes for all registered node
1837 * devices of nodes that have memory. All on-line nodes should have
1838 * registered their associated device by this time.
1839 */
1840 static void hugetlb_register_all_nodes(void)
1841 {
1842 int nid;
1843
1844 for_each_node_state(nid, N_MEMORY) {
1845 struct node *node = node_devices[nid];
1846 if (node->dev.id == nid)
1847 hugetlb_register_node(node);
1848 }
1849
1850 /*
1851 * Let the node device driver know we're here so it can
1852 * [un]register hstate attributes on node hotplug.
1853 */
1854 register_hugetlbfs_with_node(hugetlb_register_node,
1855 hugetlb_unregister_node);
1856 }
1857 #else /* !CONFIG_NUMA */
1858
1859 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1860 {
1861 BUG();
1862 if (nidp)
1863 *nidp = -1;
1864 return NULL;
1865 }
1866
1867 static void hugetlb_unregister_all_nodes(void) { }
1868
1869 static void hugetlb_register_all_nodes(void) { }
1870
1871 #endif
1872
1873 static void __exit hugetlb_exit(void)
1874 {
1875 struct hstate *h;
1876
1877 hugetlb_unregister_all_nodes();
1878
1879 for_each_hstate(h) {
1880 kobject_put(hstate_kobjs[hstate_index(h)]);
1881 }
1882
1883 kobject_put(hugepages_kobj);
1884 }
1885 module_exit(hugetlb_exit);
1886
1887 static int __init hugetlb_init(void)
1888 {
1889 /* Some platform decide whether they support huge pages at boot
1890 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1891 * there is no such support
1892 */
1893 if (HPAGE_SHIFT == 0)
1894 return 0;
1895
1896 if (!size_to_hstate(default_hstate_size)) {
1897 default_hstate_size = HPAGE_SIZE;
1898 if (!size_to_hstate(default_hstate_size))
1899 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1900 }
1901 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1902 if (default_hstate_max_huge_pages)
1903 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1904
1905 hugetlb_init_hstates();
1906 gather_bootmem_prealloc();
1907 report_hugepages();
1908
1909 hugetlb_sysfs_init();
1910 hugetlb_register_all_nodes();
1911 hugetlb_cgroup_file_init();
1912
1913 return 0;
1914 }
1915 module_init(hugetlb_init);
1916
1917 /* Should be called on processing a hugepagesz=... option */
1918 void __init hugetlb_add_hstate(unsigned order)
1919 {
1920 struct hstate *h;
1921 unsigned long i;
1922
1923 if (size_to_hstate(PAGE_SIZE << order)) {
1924 pr_warning("hugepagesz= specified twice, ignoring\n");
1925 return;
1926 }
1927 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1928 BUG_ON(order == 0);
1929 h = &hstates[hugetlb_max_hstate++];
1930 h->order = order;
1931 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1932 h->nr_huge_pages = 0;
1933 h->free_huge_pages = 0;
1934 for (i = 0; i < MAX_NUMNODES; ++i)
1935 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1936 INIT_LIST_HEAD(&h->hugepage_activelist);
1937 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
1938 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
1939 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1940 huge_page_size(h)/1024);
1941
1942 parsed_hstate = h;
1943 }
1944
1945 static int __init hugetlb_nrpages_setup(char *s)
1946 {
1947 unsigned long *mhp;
1948 static unsigned long *last_mhp;
1949
1950 /*
1951 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
1952 * so this hugepages= parameter goes to the "default hstate".
1953 */
1954 if (!hugetlb_max_hstate)
1955 mhp = &default_hstate_max_huge_pages;
1956 else
1957 mhp = &parsed_hstate->max_huge_pages;
1958
1959 if (mhp == last_mhp) {
1960 pr_warning("hugepages= specified twice without "
1961 "interleaving hugepagesz=, ignoring\n");
1962 return 1;
1963 }
1964
1965 if (sscanf(s, "%lu", mhp) <= 0)
1966 *mhp = 0;
1967
1968 /*
1969 * Global state is always initialized later in hugetlb_init.
1970 * But we need to allocate >= MAX_ORDER hstates here early to still
1971 * use the bootmem allocator.
1972 */
1973 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
1974 hugetlb_hstate_alloc_pages(parsed_hstate);
1975
1976 last_mhp = mhp;
1977
1978 return 1;
1979 }
1980 __setup("hugepages=", hugetlb_nrpages_setup);
1981
1982 static int __init hugetlb_default_setup(char *s)
1983 {
1984 default_hstate_size = memparse(s, &s);
1985 return 1;
1986 }
1987 __setup("default_hugepagesz=", hugetlb_default_setup);
1988
1989 static unsigned int cpuset_mems_nr(unsigned int *array)
1990 {
1991 int node;
1992 unsigned int nr = 0;
1993
1994 for_each_node_mask(node, cpuset_current_mems_allowed)
1995 nr += array[node];
1996
1997 return nr;
1998 }
1999
2000 #ifdef CONFIG_SYSCTL
2001 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2002 struct ctl_table *table, int write,
2003 void __user *buffer, size_t *length, loff_t *ppos)
2004 {
2005 struct hstate *h = &default_hstate;
2006 unsigned long tmp;
2007 int ret;
2008
2009 tmp = h->max_huge_pages;
2010
2011 if (write && h->order >= MAX_ORDER)
2012 return -EINVAL;
2013
2014 table->data = &tmp;
2015 table->maxlen = sizeof(unsigned long);
2016 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2017 if (ret)
2018 goto out;
2019
2020 if (write) {
2021 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2022 GFP_KERNEL | __GFP_NORETRY);
2023 if (!(obey_mempolicy &&
2024 init_nodemask_of_mempolicy(nodes_allowed))) {
2025 NODEMASK_FREE(nodes_allowed);
2026 nodes_allowed = &node_states[N_MEMORY];
2027 }
2028 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2029
2030 if (nodes_allowed != &node_states[N_MEMORY])
2031 NODEMASK_FREE(nodes_allowed);
2032 }
2033 out:
2034 return ret;
2035 }
2036
2037 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2038 void __user *buffer, size_t *length, loff_t *ppos)
2039 {
2040
2041 return hugetlb_sysctl_handler_common(false, table, write,
2042 buffer, length, ppos);
2043 }
2044
2045 #ifdef CONFIG_NUMA
2046 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2047 void __user *buffer, size_t *length, loff_t *ppos)
2048 {
2049 return hugetlb_sysctl_handler_common(true, table, write,
2050 buffer, length, ppos);
2051 }
2052 #endif /* CONFIG_NUMA */
2053
2054 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2055 void __user *buffer,
2056 size_t *length, loff_t *ppos)
2057 {
2058 proc_dointvec(table, write, buffer, length, ppos);
2059 if (hugepages_treat_as_movable)
2060 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2061 else
2062 htlb_alloc_mask = GFP_HIGHUSER;
2063 return 0;
2064 }
2065
2066 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2067 void __user *buffer,
2068 size_t *length, loff_t *ppos)
2069 {
2070 struct hstate *h = &default_hstate;
2071 unsigned long tmp;
2072 int ret;
2073
2074 tmp = h->nr_overcommit_huge_pages;
2075
2076 if (write && h->order >= MAX_ORDER)
2077 return -EINVAL;
2078
2079 table->data = &tmp;
2080 table->maxlen = sizeof(unsigned long);
2081 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2082 if (ret)
2083 goto out;
2084
2085 if (write) {
2086 spin_lock(&hugetlb_lock);
2087 h->nr_overcommit_huge_pages = tmp;
2088 spin_unlock(&hugetlb_lock);
2089 }
2090 out:
2091 return ret;
2092 }
2093
2094 #endif /* CONFIG_SYSCTL */
2095
2096 void hugetlb_report_meminfo(struct seq_file *m)
2097 {
2098 struct hstate *h = &default_hstate;
2099 seq_printf(m,
2100 "HugePages_Total: %5lu\n"
2101 "HugePages_Free: %5lu\n"
2102 "HugePages_Rsvd: %5lu\n"
2103 "HugePages_Surp: %5lu\n"
2104 "Hugepagesize: %8lu kB\n",
2105 h->nr_huge_pages,
2106 h->free_huge_pages,
2107 h->resv_huge_pages,
2108 h->surplus_huge_pages,
2109 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2110 }
2111
2112 int hugetlb_report_node_meminfo(int nid, char *buf)
2113 {
2114 struct hstate *h = &default_hstate;
2115 return sprintf(buf,
2116 "Node %d HugePages_Total: %5u\n"
2117 "Node %d HugePages_Free: %5u\n"
2118 "Node %d HugePages_Surp: %5u\n",
2119 nid, h->nr_huge_pages_node[nid],
2120 nid, h->free_huge_pages_node[nid],
2121 nid, h->surplus_huge_pages_node[nid]);
2122 }
2123
2124 void hugetlb_show_meminfo(void)
2125 {
2126 struct hstate *h;
2127 int nid;
2128
2129 for_each_node_state(nid, N_MEMORY)
2130 for_each_hstate(h)
2131 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2132 nid,
2133 h->nr_huge_pages_node[nid],
2134 h->free_huge_pages_node[nid],
2135 h->surplus_huge_pages_node[nid],
2136 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2137 }
2138
2139 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2140 unsigned long hugetlb_total_pages(void)
2141 {
2142 struct hstate *h;
2143 unsigned long nr_total_pages = 0;
2144
2145 for_each_hstate(h)
2146 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2147 return nr_total_pages;
2148 }
2149
2150 static int hugetlb_acct_memory(struct hstate *h, long delta)
2151 {
2152 int ret = -ENOMEM;
2153
2154 spin_lock(&hugetlb_lock);
2155 /*
2156 * When cpuset is configured, it breaks the strict hugetlb page
2157 * reservation as the accounting is done on a global variable. Such
2158 * reservation is completely rubbish in the presence of cpuset because
2159 * the reservation is not checked against page availability for the
2160 * current cpuset. Application can still potentially OOM'ed by kernel
2161 * with lack of free htlb page in cpuset that the task is in.
2162 * Attempt to enforce strict accounting with cpuset is almost
2163 * impossible (or too ugly) because cpuset is too fluid that
2164 * task or memory node can be dynamically moved between cpusets.
2165 *
2166 * The change of semantics for shared hugetlb mapping with cpuset is
2167 * undesirable. However, in order to preserve some of the semantics,
2168 * we fall back to check against current free page availability as
2169 * a best attempt and hopefully to minimize the impact of changing
2170 * semantics that cpuset has.
2171 */
2172 if (delta > 0) {
2173 if (gather_surplus_pages(h, delta) < 0)
2174 goto out;
2175
2176 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2177 return_unused_surplus_pages(h, delta);
2178 goto out;
2179 }
2180 }
2181
2182 ret = 0;
2183 if (delta < 0)
2184 return_unused_surplus_pages(h, (unsigned long) -delta);
2185
2186 out:
2187 spin_unlock(&hugetlb_lock);
2188 return ret;
2189 }
2190
2191 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2192 {
2193 struct resv_map *reservations = vma_resv_map(vma);
2194
2195 /*
2196 * This new VMA should share its siblings reservation map if present.
2197 * The VMA will only ever have a valid reservation map pointer where
2198 * it is being copied for another still existing VMA. As that VMA
2199 * has a reference to the reservation map it cannot disappear until
2200 * after this open call completes. It is therefore safe to take a
2201 * new reference here without additional locking.
2202 */
2203 if (reservations)
2204 kref_get(&reservations->refs);
2205 }
2206
2207 static void resv_map_put(struct vm_area_struct *vma)
2208 {
2209 struct resv_map *reservations = vma_resv_map(vma);
2210
2211 if (!reservations)
2212 return;
2213 kref_put(&reservations->refs, resv_map_release);
2214 }
2215
2216 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2217 {
2218 struct hstate *h = hstate_vma(vma);
2219 struct resv_map *reservations = vma_resv_map(vma);
2220 struct hugepage_subpool *spool = subpool_vma(vma);
2221 unsigned long reserve;
2222 unsigned long start;
2223 unsigned long end;
2224
2225 if (reservations) {
2226 start = vma_hugecache_offset(h, vma, vma->vm_start);
2227 end = vma_hugecache_offset(h, vma, vma->vm_end);
2228
2229 reserve = (end - start) -
2230 region_count(&reservations->regions, start, end);
2231
2232 resv_map_put(vma);
2233
2234 if (reserve) {
2235 hugetlb_acct_memory(h, -reserve);
2236 hugepage_subpool_put_pages(spool, reserve);
2237 }
2238 }
2239 }
2240
2241 /*
2242 * We cannot handle pagefaults against hugetlb pages at all. They cause
2243 * handle_mm_fault() to try to instantiate regular-sized pages in the
2244 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2245 * this far.
2246 */
2247 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2248 {
2249 BUG();
2250 return 0;
2251 }
2252
2253 const struct vm_operations_struct hugetlb_vm_ops = {
2254 .fault = hugetlb_vm_op_fault,
2255 .open = hugetlb_vm_op_open,
2256 .close = hugetlb_vm_op_close,
2257 };
2258
2259 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2260 int writable)
2261 {
2262 pte_t entry;
2263
2264 if (writable) {
2265 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2266 vma->vm_page_prot)));
2267 } else {
2268 entry = huge_pte_wrprotect(mk_huge_pte(page,
2269 vma->vm_page_prot));
2270 }
2271 entry = pte_mkyoung(entry);
2272 entry = pte_mkhuge(entry);
2273 entry = arch_make_huge_pte(entry, vma, page, writable);
2274
2275 return entry;
2276 }
2277
2278 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2279 unsigned long address, pte_t *ptep)
2280 {
2281 pte_t entry;
2282
2283 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2284 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2285 update_mmu_cache(vma, address, ptep);
2286 }
2287
2288
2289 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2290 struct vm_area_struct *vma)
2291 {
2292 pte_t *src_pte, *dst_pte, entry;
2293 struct page *ptepage;
2294 unsigned long addr;
2295 int cow;
2296 struct hstate *h = hstate_vma(vma);
2297 unsigned long sz = huge_page_size(h);
2298
2299 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2300
2301 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2302 src_pte = huge_pte_offset(src, addr);
2303 if (!src_pte)
2304 continue;
2305 dst_pte = huge_pte_alloc(dst, addr, sz);
2306 if (!dst_pte)
2307 goto nomem;
2308
2309 /* If the pagetables are shared don't copy or take references */
2310 if (dst_pte == src_pte)
2311 continue;
2312
2313 spin_lock(&dst->page_table_lock);
2314 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2315 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2316 if (cow)
2317 huge_ptep_set_wrprotect(src, addr, src_pte);
2318 entry = huge_ptep_get(src_pte);
2319 ptepage = pte_page(entry);
2320 get_page(ptepage);
2321 page_dup_rmap(ptepage);
2322 set_huge_pte_at(dst, addr, dst_pte, entry);
2323 }
2324 spin_unlock(&src->page_table_lock);
2325 spin_unlock(&dst->page_table_lock);
2326 }
2327 return 0;
2328
2329 nomem:
2330 return -ENOMEM;
2331 }
2332
2333 static int is_hugetlb_entry_migration(pte_t pte)
2334 {
2335 swp_entry_t swp;
2336
2337 if (huge_pte_none(pte) || pte_present(pte))
2338 return 0;
2339 swp = pte_to_swp_entry(pte);
2340 if (non_swap_entry(swp) && is_migration_entry(swp))
2341 return 1;
2342 else
2343 return 0;
2344 }
2345
2346 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2347 {
2348 swp_entry_t swp;
2349
2350 if (huge_pte_none(pte) || pte_present(pte))
2351 return 0;
2352 swp = pte_to_swp_entry(pte);
2353 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2354 return 1;
2355 else
2356 return 0;
2357 }
2358
2359 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2360 unsigned long start, unsigned long end,
2361 struct page *ref_page)
2362 {
2363 int force_flush = 0;
2364 struct mm_struct *mm = vma->vm_mm;
2365 unsigned long address;
2366 pte_t *ptep;
2367 pte_t pte;
2368 struct page *page;
2369 struct hstate *h = hstate_vma(vma);
2370 unsigned long sz = huge_page_size(h);
2371 const unsigned long mmun_start = start; /* For mmu_notifiers */
2372 const unsigned long mmun_end = end; /* For mmu_notifiers */
2373
2374 WARN_ON(!is_vm_hugetlb_page(vma));
2375 BUG_ON(start & ~huge_page_mask(h));
2376 BUG_ON(end & ~huge_page_mask(h));
2377
2378 tlb_start_vma(tlb, vma);
2379 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2380 again:
2381 spin_lock(&mm->page_table_lock);
2382 for (address = start; address < end; address += sz) {
2383 ptep = huge_pte_offset(mm, address);
2384 if (!ptep)
2385 continue;
2386
2387 if (huge_pmd_unshare(mm, &address, ptep))
2388 continue;
2389
2390 pte = huge_ptep_get(ptep);
2391 if (huge_pte_none(pte))
2392 continue;
2393
2394 /*
2395 * HWPoisoned hugepage is already unmapped and dropped reference
2396 */
2397 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2398 huge_pte_clear(mm, address, ptep);
2399 continue;
2400 }
2401
2402 page = pte_page(pte);
2403 /*
2404 * If a reference page is supplied, it is because a specific
2405 * page is being unmapped, not a range. Ensure the page we
2406 * are about to unmap is the actual page of interest.
2407 */
2408 if (ref_page) {
2409 if (page != ref_page)
2410 continue;
2411
2412 /*
2413 * Mark the VMA as having unmapped its page so that
2414 * future faults in this VMA will fail rather than
2415 * looking like data was lost
2416 */
2417 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2418 }
2419
2420 pte = huge_ptep_get_and_clear(mm, address, ptep);
2421 tlb_remove_tlb_entry(tlb, ptep, address);
2422 if (huge_pte_dirty(pte))
2423 set_page_dirty(page);
2424
2425 page_remove_rmap(page);
2426 force_flush = !__tlb_remove_page(tlb, page);
2427 if (force_flush)
2428 break;
2429 /* Bail out after unmapping reference page if supplied */
2430 if (ref_page)
2431 break;
2432 }
2433 spin_unlock(&mm->page_table_lock);
2434 /*
2435 * mmu_gather ran out of room to batch pages, we break out of
2436 * the PTE lock to avoid doing the potential expensive TLB invalidate
2437 * and page-free while holding it.
2438 */
2439 if (force_flush) {
2440 force_flush = 0;
2441 tlb_flush_mmu(tlb);
2442 if (address < end && !ref_page)
2443 goto again;
2444 }
2445 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2446 tlb_end_vma(tlb, vma);
2447 }
2448
2449 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2450 struct vm_area_struct *vma, unsigned long start,
2451 unsigned long end, struct page *ref_page)
2452 {
2453 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2454
2455 /*
2456 * Clear this flag so that x86's huge_pmd_share page_table_shareable
2457 * test will fail on a vma being torn down, and not grab a page table
2458 * on its way out. We're lucky that the flag has such an appropriate
2459 * name, and can in fact be safely cleared here. We could clear it
2460 * before the __unmap_hugepage_range above, but all that's necessary
2461 * is to clear it before releasing the i_mmap_mutex. This works
2462 * because in the context this is called, the VMA is about to be
2463 * destroyed and the i_mmap_mutex is held.
2464 */
2465 vma->vm_flags &= ~VM_MAYSHARE;
2466 }
2467
2468 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2469 unsigned long end, struct page *ref_page)
2470 {
2471 struct mm_struct *mm;
2472 struct mmu_gather tlb;
2473
2474 mm = vma->vm_mm;
2475
2476 tlb_gather_mmu(&tlb, mm, 0);
2477 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2478 tlb_finish_mmu(&tlb, start, end);
2479 }
2480
2481 /*
2482 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2483 * mappping it owns the reserve page for. The intention is to unmap the page
2484 * from other VMAs and let the children be SIGKILLed if they are faulting the
2485 * same region.
2486 */
2487 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2488 struct page *page, unsigned long address)
2489 {
2490 struct hstate *h = hstate_vma(vma);
2491 struct vm_area_struct *iter_vma;
2492 struct address_space *mapping;
2493 pgoff_t pgoff;
2494
2495 /*
2496 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2497 * from page cache lookup which is in HPAGE_SIZE units.
2498 */
2499 address = address & huge_page_mask(h);
2500 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2501 vma->vm_pgoff;
2502 mapping = file_inode(vma->vm_file)->i_mapping;
2503
2504 /*
2505 * Take the mapping lock for the duration of the table walk. As
2506 * this mapping should be shared between all the VMAs,
2507 * __unmap_hugepage_range() is called as the lock is already held
2508 */
2509 mutex_lock(&mapping->i_mmap_mutex);
2510 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2511 /* Do not unmap the current VMA */
2512 if (iter_vma == vma)
2513 continue;
2514
2515 /*
2516 * Unmap the page from other VMAs without their own reserves.
2517 * They get marked to be SIGKILLed if they fault in these
2518 * areas. This is because a future no-page fault on this VMA
2519 * could insert a zeroed page instead of the data existing
2520 * from the time of fork. This would look like data corruption
2521 */
2522 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2523 unmap_hugepage_range(iter_vma, address,
2524 address + huge_page_size(h), page);
2525 }
2526 mutex_unlock(&mapping->i_mmap_mutex);
2527
2528 return 1;
2529 }
2530
2531 /*
2532 * Hugetlb_cow() should be called with page lock of the original hugepage held.
2533 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2534 * cannot race with other handlers or page migration.
2535 * Keep the pte_same checks anyway to make transition from the mutex easier.
2536 */
2537 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2538 unsigned long address, pte_t *ptep, pte_t pte,
2539 struct page *pagecache_page)
2540 {
2541 struct hstate *h = hstate_vma(vma);
2542 struct page *old_page, *new_page;
2543 int avoidcopy;
2544 int outside_reserve = 0;
2545 unsigned long mmun_start; /* For mmu_notifiers */
2546 unsigned long mmun_end; /* For mmu_notifiers */
2547
2548 old_page = pte_page(pte);
2549
2550 retry_avoidcopy:
2551 /* If no-one else is actually using this page, avoid the copy
2552 * and just make the page writable */
2553 avoidcopy = (page_mapcount(old_page) == 1);
2554 if (avoidcopy) {
2555 if (PageAnon(old_page))
2556 page_move_anon_rmap(old_page, vma, address);
2557 set_huge_ptep_writable(vma, address, ptep);
2558 return 0;
2559 }
2560
2561 /*
2562 * If the process that created a MAP_PRIVATE mapping is about to
2563 * perform a COW due to a shared page count, attempt to satisfy
2564 * the allocation without using the existing reserves. The pagecache
2565 * page is used to determine if the reserve at this address was
2566 * consumed or not. If reserves were used, a partial faulted mapping
2567 * at the time of fork() could consume its reserves on COW instead
2568 * of the full address range.
2569 */
2570 if (!(vma->vm_flags & VM_MAYSHARE) &&
2571 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2572 old_page != pagecache_page)
2573 outside_reserve = 1;
2574
2575 page_cache_get(old_page);
2576
2577 /* Drop page_table_lock as buddy allocator may be called */
2578 spin_unlock(&mm->page_table_lock);
2579 new_page = alloc_huge_page(vma, address, outside_reserve);
2580
2581 if (IS_ERR(new_page)) {
2582 long err = PTR_ERR(new_page);
2583 page_cache_release(old_page);
2584
2585 /*
2586 * If a process owning a MAP_PRIVATE mapping fails to COW,
2587 * it is due to references held by a child and an insufficient
2588 * huge page pool. To guarantee the original mappers
2589 * reliability, unmap the page from child processes. The child
2590 * may get SIGKILLed if it later faults.
2591 */
2592 if (outside_reserve) {
2593 BUG_ON(huge_pte_none(pte));
2594 if (unmap_ref_private(mm, vma, old_page, address)) {
2595 BUG_ON(huge_pte_none(pte));
2596 spin_lock(&mm->page_table_lock);
2597 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2598 if (likely(pte_same(huge_ptep_get(ptep), pte)))
2599 goto retry_avoidcopy;
2600 /*
2601 * race occurs while re-acquiring page_table_lock, and
2602 * our job is done.
2603 */
2604 return 0;
2605 }
2606 WARN_ON_ONCE(1);
2607 }
2608
2609 /* Caller expects lock to be held */
2610 spin_lock(&mm->page_table_lock);
2611 if (err == -ENOMEM)
2612 return VM_FAULT_OOM;
2613 else
2614 return VM_FAULT_SIGBUS;
2615 }
2616
2617 /*
2618 * When the original hugepage is shared one, it does not have
2619 * anon_vma prepared.
2620 */
2621 if (unlikely(anon_vma_prepare(vma))) {
2622 page_cache_release(new_page);
2623 page_cache_release(old_page);
2624 /* Caller expects lock to be held */
2625 spin_lock(&mm->page_table_lock);
2626 return VM_FAULT_OOM;
2627 }
2628
2629 copy_user_huge_page(new_page, old_page, address, vma,
2630 pages_per_huge_page(h));
2631 __SetPageUptodate(new_page);
2632
2633 mmun_start = address & huge_page_mask(h);
2634 mmun_end = mmun_start + huge_page_size(h);
2635 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2636 /*
2637 * Retake the page_table_lock to check for racing updates
2638 * before the page tables are altered
2639 */
2640 spin_lock(&mm->page_table_lock);
2641 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2642 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2643 /* Break COW */
2644 huge_ptep_clear_flush(vma, address, ptep);
2645 set_huge_pte_at(mm, address, ptep,
2646 make_huge_pte(vma, new_page, 1));
2647 page_remove_rmap(old_page);
2648 hugepage_add_new_anon_rmap(new_page, vma, address);
2649 /* Make the old page be freed below */
2650 new_page = old_page;
2651 }
2652 spin_unlock(&mm->page_table_lock);
2653 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2654 /* Caller expects lock to be held */
2655 spin_lock(&mm->page_table_lock);
2656 page_cache_release(new_page);
2657 page_cache_release(old_page);
2658 return 0;
2659 }
2660
2661 /* Return the pagecache page at a given address within a VMA */
2662 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2663 struct vm_area_struct *vma, unsigned long address)
2664 {
2665 struct address_space *mapping;
2666 pgoff_t idx;
2667
2668 mapping = vma->vm_file->f_mapping;
2669 idx = vma_hugecache_offset(h, vma, address);
2670
2671 return find_lock_page(mapping, idx);
2672 }
2673
2674 /*
2675 * Return whether there is a pagecache page to back given address within VMA.
2676 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2677 */
2678 static bool hugetlbfs_pagecache_present(struct hstate *h,
2679 struct vm_area_struct *vma, unsigned long address)
2680 {
2681 struct address_space *mapping;
2682 pgoff_t idx;
2683 struct page *page;
2684
2685 mapping = vma->vm_file->f_mapping;
2686 idx = vma_hugecache_offset(h, vma, address);
2687
2688 page = find_get_page(mapping, idx);
2689 if (page)
2690 put_page(page);
2691 return page != NULL;
2692 }
2693
2694 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2695 unsigned long address, pte_t *ptep, unsigned int flags)
2696 {
2697 struct hstate *h = hstate_vma(vma);
2698 int ret = VM_FAULT_SIGBUS;
2699 int anon_rmap = 0;
2700 pgoff_t idx;
2701 unsigned long size;
2702 struct page *page;
2703 struct address_space *mapping;
2704 pte_t new_pte;
2705
2706 /*
2707 * Currently, we are forced to kill the process in the event the
2708 * original mapper has unmapped pages from the child due to a failed
2709 * COW. Warn that such a situation has occurred as it may not be obvious
2710 */
2711 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2712 pr_warning("PID %d killed due to inadequate hugepage pool\n",
2713 current->pid);
2714 return ret;
2715 }
2716
2717 mapping = vma->vm_file->f_mapping;
2718 idx = vma_hugecache_offset(h, vma, address);
2719
2720 /*
2721 * Use page lock to guard against racing truncation
2722 * before we get page_table_lock.
2723 */
2724 retry:
2725 page = find_lock_page(mapping, idx);
2726 if (!page) {
2727 size = i_size_read(mapping->host) >> huge_page_shift(h);
2728 if (idx >= size)
2729 goto out;
2730 page = alloc_huge_page(vma, address, 0);
2731 if (IS_ERR(page)) {
2732 ret = PTR_ERR(page);
2733 if (ret == -ENOMEM)
2734 ret = VM_FAULT_OOM;
2735 else
2736 ret = VM_FAULT_SIGBUS;
2737 goto out;
2738 }
2739 clear_huge_page(page, address, pages_per_huge_page(h));
2740 __SetPageUptodate(page);
2741
2742 if (vma->vm_flags & VM_MAYSHARE) {
2743 int err;
2744 struct inode *inode = mapping->host;
2745
2746 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2747 if (err) {
2748 put_page(page);
2749 if (err == -EEXIST)
2750 goto retry;
2751 goto out;
2752 }
2753
2754 spin_lock(&inode->i_lock);
2755 inode->i_blocks += blocks_per_huge_page(h);
2756 spin_unlock(&inode->i_lock);
2757 } else {
2758 lock_page(page);
2759 if (unlikely(anon_vma_prepare(vma))) {
2760 ret = VM_FAULT_OOM;
2761 goto backout_unlocked;
2762 }
2763 anon_rmap = 1;
2764 }
2765 } else {
2766 /*
2767 * If memory error occurs between mmap() and fault, some process
2768 * don't have hwpoisoned swap entry for errored virtual address.
2769 * So we need to block hugepage fault by PG_hwpoison bit check.
2770 */
2771 if (unlikely(PageHWPoison(page))) {
2772 ret = VM_FAULT_HWPOISON |
2773 VM_FAULT_SET_HINDEX(hstate_index(h));
2774 goto backout_unlocked;
2775 }
2776 }
2777
2778 /*
2779 * If we are going to COW a private mapping later, we examine the
2780 * pending reservations for this page now. This will ensure that
2781 * any allocations necessary to record that reservation occur outside
2782 * the spinlock.
2783 */
2784 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2785 if (vma_needs_reservation(h, vma, address) < 0) {
2786 ret = VM_FAULT_OOM;
2787 goto backout_unlocked;
2788 }
2789
2790 spin_lock(&mm->page_table_lock);
2791 size = i_size_read(mapping->host) >> huge_page_shift(h);
2792 if (idx >= size)
2793 goto backout;
2794
2795 ret = 0;
2796 if (!huge_pte_none(huge_ptep_get(ptep)))
2797 goto backout;
2798
2799 if (anon_rmap)
2800 hugepage_add_new_anon_rmap(page, vma, address);
2801 else
2802 page_dup_rmap(page);
2803 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2804 && (vma->vm_flags & VM_SHARED)));
2805 set_huge_pte_at(mm, address, ptep, new_pte);
2806
2807 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2808 /* Optimization, do the COW without a second fault */
2809 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2810 }
2811
2812 spin_unlock(&mm->page_table_lock);
2813 unlock_page(page);
2814 out:
2815 return ret;
2816
2817 backout:
2818 spin_unlock(&mm->page_table_lock);
2819 backout_unlocked:
2820 unlock_page(page);
2821 put_page(page);
2822 goto out;
2823 }
2824
2825 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2826 unsigned long address, unsigned int flags)
2827 {
2828 pte_t *ptep;
2829 pte_t entry;
2830 int ret;
2831 struct page *page = NULL;
2832 struct page *pagecache_page = NULL;
2833 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2834 struct hstate *h = hstate_vma(vma);
2835
2836 address &= huge_page_mask(h);
2837
2838 ptep = huge_pte_offset(mm, address);
2839 if (ptep) {
2840 entry = huge_ptep_get(ptep);
2841 if (unlikely(is_hugetlb_entry_migration(entry))) {
2842 migration_entry_wait_huge(mm, ptep);
2843 return 0;
2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2845 return VM_FAULT_HWPOISON_LARGE |
2846 VM_FAULT_SET_HINDEX(hstate_index(h));
2847 }
2848
2849 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2850 if (!ptep)
2851 return VM_FAULT_OOM;
2852
2853 /*
2854 * Serialize hugepage allocation and instantiation, so that we don't
2855 * get spurious allocation failures if two CPUs race to instantiate
2856 * the same page in the page cache.
2857 */
2858 mutex_lock(&hugetlb_instantiation_mutex);
2859 entry = huge_ptep_get(ptep);
2860 if (huge_pte_none(entry)) {
2861 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2862 goto out_mutex;
2863 }
2864
2865 ret = 0;
2866
2867 /*
2868 * If we are going to COW the mapping later, we examine the pending
2869 * reservations for this page now. This will ensure that any
2870 * allocations necessary to record that reservation occur outside the
2871 * spinlock. For private mappings, we also lookup the pagecache
2872 * page now as it is used to determine if a reservation has been
2873 * consumed.
2874 */
2875 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
2876 if (vma_needs_reservation(h, vma, address) < 0) {
2877 ret = VM_FAULT_OOM;
2878 goto out_mutex;
2879 }
2880
2881 if (!(vma->vm_flags & VM_MAYSHARE))
2882 pagecache_page = hugetlbfs_pagecache_page(h,
2883 vma, address);
2884 }
2885
2886 /*
2887 * hugetlb_cow() requires page locks of pte_page(entry) and
2888 * pagecache_page, so here we need take the former one
2889 * when page != pagecache_page or !pagecache_page.
2890 * Note that locking order is always pagecache_page -> page,
2891 * so no worry about deadlock.
2892 */
2893 page = pte_page(entry);
2894 get_page(page);
2895 if (page != pagecache_page)
2896 lock_page(page);
2897
2898 spin_lock(&mm->page_table_lock);
2899 /* Check for a racing update before calling hugetlb_cow */
2900 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2901 goto out_page_table_lock;
2902
2903
2904 if (flags & FAULT_FLAG_WRITE) {
2905 if (!huge_pte_write(entry)) {
2906 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2907 pagecache_page);
2908 goto out_page_table_lock;
2909 }
2910 entry = huge_pte_mkdirty(entry);
2911 }
2912 entry = pte_mkyoung(entry);
2913 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2914 flags & FAULT_FLAG_WRITE))
2915 update_mmu_cache(vma, address, ptep);
2916
2917 out_page_table_lock:
2918 spin_unlock(&mm->page_table_lock);
2919
2920 if (pagecache_page) {
2921 unlock_page(pagecache_page);
2922 put_page(pagecache_page);
2923 }
2924 if (page != pagecache_page)
2925 unlock_page(page);
2926 put_page(page);
2927
2928 out_mutex:
2929 mutex_unlock(&hugetlb_instantiation_mutex);
2930
2931 return ret;
2932 }
2933
2934 /* Can be overriden by architectures */
2935 __attribute__((weak)) struct page *
2936 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2937 pud_t *pud, int write)
2938 {
2939 BUG();
2940 return NULL;
2941 }
2942
2943 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2944 struct page **pages, struct vm_area_struct **vmas,
2945 unsigned long *position, unsigned long *nr_pages,
2946 long i, unsigned int flags)
2947 {
2948 unsigned long pfn_offset;
2949 unsigned long vaddr = *position;
2950 unsigned long remainder = *nr_pages;
2951 struct hstate *h = hstate_vma(vma);
2952
2953 spin_lock(&mm->page_table_lock);
2954 while (vaddr < vma->vm_end && remainder) {
2955 pte_t *pte;
2956 int absent;
2957 struct page *page;
2958
2959 /*
2960 * Some archs (sparc64, sh*) have multiple pte_ts to
2961 * each hugepage. We have to make sure we get the
2962 * first, for the page indexing below to work.
2963 */
2964 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2965 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2966
2967 /*
2968 * When coredumping, it suits get_dump_page if we just return
2969 * an error where there's an empty slot with no huge pagecache
2970 * to back it. This way, we avoid allocating a hugepage, and
2971 * the sparse dumpfile avoids allocating disk blocks, but its
2972 * huge holes still show up with zeroes where they need to be.
2973 */
2974 if (absent && (flags & FOLL_DUMP) &&
2975 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2976 remainder = 0;
2977 break;
2978 }
2979
2980 /*
2981 * We need call hugetlb_fault for both hugepages under migration
2982 * (in which case hugetlb_fault waits for the migration,) and
2983 * hwpoisoned hugepages (in which case we need to prevent the
2984 * caller from accessing to them.) In order to do this, we use
2985 * here is_swap_pte instead of is_hugetlb_entry_migration and
2986 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
2987 * both cases, and because we can't follow correct pages
2988 * directly from any kind of swap entries.
2989 */
2990 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
2991 ((flags & FOLL_WRITE) &&
2992 !huge_pte_write(huge_ptep_get(pte)))) {
2993 int ret;
2994
2995 spin_unlock(&mm->page_table_lock);
2996 ret = hugetlb_fault(mm, vma, vaddr,
2997 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2998 spin_lock(&mm->page_table_lock);
2999 if (!(ret & VM_FAULT_ERROR))
3000 continue;
3001
3002 remainder = 0;
3003 break;
3004 }
3005
3006 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3007 page = pte_page(huge_ptep_get(pte));
3008 same_page:
3009 if (pages) {
3010 pages[i] = mem_map_offset(page, pfn_offset);
3011 get_page(pages[i]);
3012 }
3013
3014 if (vmas)
3015 vmas[i] = vma;
3016
3017 vaddr += PAGE_SIZE;
3018 ++pfn_offset;
3019 --remainder;
3020 ++i;
3021 if (vaddr < vma->vm_end && remainder &&
3022 pfn_offset < pages_per_huge_page(h)) {
3023 /*
3024 * We use pfn_offset to avoid touching the pageframes
3025 * of this compound page.
3026 */
3027 goto same_page;
3028 }
3029 }
3030 spin_unlock(&mm->page_table_lock);
3031 *nr_pages = remainder;
3032 *position = vaddr;
3033
3034 return i ? i : -EFAULT;
3035 }
3036
3037 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3038 unsigned long address, unsigned long end, pgprot_t newprot)
3039 {
3040 struct mm_struct *mm = vma->vm_mm;
3041 unsigned long start = address;
3042 pte_t *ptep;
3043 pte_t pte;
3044 struct hstate *h = hstate_vma(vma);
3045 unsigned long pages = 0;
3046
3047 BUG_ON(address >= end);
3048 flush_cache_range(vma, address, end);
3049
3050 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3051 spin_lock(&mm->page_table_lock);
3052 for (; address < end; address += huge_page_size(h)) {
3053 ptep = huge_pte_offset(mm, address);
3054 if (!ptep)
3055 continue;
3056 if (huge_pmd_unshare(mm, &address, ptep)) {
3057 pages++;
3058 continue;
3059 }
3060 if (!huge_pte_none(huge_ptep_get(ptep))) {
3061 pte = huge_ptep_get_and_clear(mm, address, ptep);
3062 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3063 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3064 set_huge_pte_at(mm, address, ptep, pte);
3065 pages++;
3066 }
3067 }
3068 spin_unlock(&mm->page_table_lock);
3069 /*
3070 * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3071 * may have cleared our pud entry and done put_page on the page table:
3072 * once we release i_mmap_mutex, another task can do the final put_page
3073 * and that page table be reused and filled with junk.
3074 */
3075 flush_tlb_range(vma, start, end);
3076 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3077
3078 return pages << h->order;
3079 }
3080
3081 int hugetlb_reserve_pages(struct inode *inode,
3082 long from, long to,
3083 struct vm_area_struct *vma,
3084 vm_flags_t vm_flags)
3085 {
3086 long ret, chg;
3087 struct hstate *h = hstate_inode(inode);
3088 struct hugepage_subpool *spool = subpool_inode(inode);
3089
3090 /*
3091 * Only apply hugepage reservation if asked. At fault time, an
3092 * attempt will be made for VM_NORESERVE to allocate a page
3093 * without using reserves
3094 */
3095 if (vm_flags & VM_NORESERVE)
3096 return 0;
3097
3098 /*
3099 * Shared mappings base their reservation on the number of pages that
3100 * are already allocated on behalf of the file. Private mappings need
3101 * to reserve the full area even if read-only as mprotect() may be
3102 * called to make the mapping read-write. Assume !vma is a shm mapping
3103 */
3104 if (!vma || vma->vm_flags & VM_MAYSHARE)
3105 chg = region_chg(&inode->i_mapping->private_list, from, to);
3106 else {
3107 struct resv_map *resv_map = resv_map_alloc();
3108 if (!resv_map)
3109 return -ENOMEM;
3110
3111 chg = to - from;
3112
3113 set_vma_resv_map(vma, resv_map);
3114 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3115 }
3116
3117 if (chg < 0) {
3118 ret = chg;
3119 goto out_err;
3120 }
3121
3122 /* There must be enough pages in the subpool for the mapping */
3123 if (hugepage_subpool_get_pages(spool, chg)) {
3124 ret = -ENOSPC;
3125 goto out_err;
3126 }
3127
3128 /*
3129 * Check enough hugepages are available for the reservation.
3130 * Hand the pages back to the subpool if there are not
3131 */
3132 ret = hugetlb_acct_memory(h, chg);
3133 if (ret < 0) {
3134 hugepage_subpool_put_pages(spool, chg);
3135 goto out_err;
3136 }
3137
3138 /*
3139 * Account for the reservations made. Shared mappings record regions
3140 * that have reservations as they are shared by multiple VMAs.
3141 * When the last VMA disappears, the region map says how much
3142 * the reservation was and the page cache tells how much of
3143 * the reservation was consumed. Private mappings are per-VMA and
3144 * only the consumed reservations are tracked. When the VMA
3145 * disappears, the original reservation is the VMA size and the
3146 * consumed reservations are stored in the map. Hence, nothing
3147 * else has to be done for private mappings here
3148 */
3149 if (!vma || vma->vm_flags & VM_MAYSHARE)
3150 region_add(&inode->i_mapping->private_list, from, to);
3151 return 0;
3152 out_err:
3153 if (vma)
3154 resv_map_put(vma);
3155 return ret;
3156 }
3157
3158 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3159 {
3160 struct hstate *h = hstate_inode(inode);
3161 long chg = region_truncate(&inode->i_mapping->private_list, offset);
3162 struct hugepage_subpool *spool = subpool_inode(inode);
3163
3164 spin_lock(&inode->i_lock);
3165 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3166 spin_unlock(&inode->i_lock);
3167
3168 hugepage_subpool_put_pages(spool, (chg - freed));
3169 hugetlb_acct_memory(h, -(chg - freed));
3170 }
3171
3172 #ifdef CONFIG_MEMORY_FAILURE
3173
3174 /* Should be called in hugetlb_lock */
3175 static int is_hugepage_on_freelist(struct page *hpage)
3176 {
3177 struct page *page;
3178 struct page *tmp;
3179 struct hstate *h = page_hstate(hpage);
3180 int nid = page_to_nid(hpage);
3181
3182 list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3183 if (page == hpage)
3184 return 1;
3185 return 0;
3186 }
3187
3188 /*
3189 * This function is called from memory failure code.
3190 * Assume the caller holds page lock of the head page.
3191 */
3192 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3193 {
3194 struct hstate *h = page_hstate(hpage);
3195 int nid = page_to_nid(hpage);
3196 int ret = -EBUSY;
3197
3198 spin_lock(&hugetlb_lock);
3199 if (is_hugepage_on_freelist(hpage)) {
3200 /*
3201 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3202 * but dangling hpage->lru can trigger list-debug warnings
3203 * (this happens when we call unpoison_memory() on it),
3204 * so let it point to itself with list_del_init().
3205 */
3206 list_del_init(&hpage->lru);
3207 set_page_refcounted(hpage);
3208 h->free_huge_pages--;
3209 h->free_huge_pages_node[nid]--;
3210 ret = 0;
3211 }
3212 spin_unlock(&hugetlb_lock);
3213 return ret;
3214 }
3215 #endif