1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/sched.h>
5 #include <linux/mmu_notifier.h>
6 #include <linux/rmap.h>
7 #include <linux/swap.h>
8 #include <linux/mm_inline.h>
9 #include <linux/kthread.h>
10 #include <linux/khugepaged.h>
11 #include <linux/freezer.h>
12 #include <linux/mman.h>
13 #include <linux/hashtable.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/page_idle.h>
16 #include <linux/swapops.h>
19 #include <asm/pgalloc.h>
29 SCAN_NO_REFERENCED_PAGE
,
43 SCAN_ALLOC_HUGE_PAGE_FAIL
,
44 SCAN_CGROUP_CHARGE_FAIL
,
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/huge_memory.h>
51 /* default scan 8*512 pte (or vmas) every 30 second */
52 static unsigned int khugepaged_pages_to_scan __read_mostly
;
53 static unsigned int khugepaged_pages_collapsed
;
54 static unsigned int khugepaged_full_scans
;
55 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
56 /* during fragmentation poll the hugepage allocator once every minute */
57 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
58 static unsigned long khugepaged_sleep_expire
;
59 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
60 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
62 * default collapse hugepages if there is at least one pte mapped like
63 * it would have happened if the vma was large enough during page
66 static unsigned int khugepaged_max_ptes_none __read_mostly
;
67 static unsigned int khugepaged_max_ptes_swap __read_mostly
;
69 #define MM_SLOTS_HASH_BITS 10
70 static __read_mostly
DEFINE_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
72 static struct kmem_cache
*mm_slot_cache __read_mostly
;
75 * struct mm_slot - hash lookup from mm to mm_slot
76 * @hash: hash collision list
77 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
78 * @mm: the mm that this information is valid for
81 struct hlist_node hash
;
82 struct list_head mm_node
;
87 * struct khugepaged_scan - cursor for scanning
88 * @mm_head: the head of the mm list to scan
89 * @mm_slot: the current mm_slot we are scanning
90 * @address: the next address inside that to be scanned
92 * There is only the one khugepaged_scan instance of this cursor structure.
94 struct khugepaged_scan
{
95 struct list_head mm_head
;
96 struct mm_slot
*mm_slot
;
97 unsigned long address
;
100 static struct khugepaged_scan khugepaged_scan
= {
101 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
104 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
105 struct kobj_attribute
*attr
,
108 return sprintf(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
111 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
112 struct kobj_attribute
*attr
,
113 const char *buf
, size_t count
)
118 err
= kstrtoul(buf
, 10, &msecs
);
119 if (err
|| msecs
> UINT_MAX
)
122 khugepaged_scan_sleep_millisecs
= msecs
;
123 khugepaged_sleep_expire
= 0;
124 wake_up_interruptible(&khugepaged_wait
);
128 static struct kobj_attribute scan_sleep_millisecs_attr
=
129 __ATTR(scan_sleep_millisecs
, 0644, scan_sleep_millisecs_show
,
130 scan_sleep_millisecs_store
);
132 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
133 struct kobj_attribute
*attr
,
136 return sprintf(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
139 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
140 struct kobj_attribute
*attr
,
141 const char *buf
, size_t count
)
146 err
= kstrtoul(buf
, 10, &msecs
);
147 if (err
|| msecs
> UINT_MAX
)
150 khugepaged_alloc_sleep_millisecs
= msecs
;
151 khugepaged_sleep_expire
= 0;
152 wake_up_interruptible(&khugepaged_wait
);
156 static struct kobj_attribute alloc_sleep_millisecs_attr
=
157 __ATTR(alloc_sleep_millisecs
, 0644, alloc_sleep_millisecs_show
,
158 alloc_sleep_millisecs_store
);
160 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
161 struct kobj_attribute
*attr
,
164 return sprintf(buf
, "%u\n", khugepaged_pages_to_scan
);
166 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
167 struct kobj_attribute
*attr
,
168 const char *buf
, size_t count
)
173 err
= kstrtoul(buf
, 10, &pages
);
174 if (err
|| !pages
|| pages
> UINT_MAX
)
177 khugepaged_pages_to_scan
= pages
;
181 static struct kobj_attribute pages_to_scan_attr
=
182 __ATTR(pages_to_scan
, 0644, pages_to_scan_show
,
183 pages_to_scan_store
);
185 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
186 struct kobj_attribute
*attr
,
189 return sprintf(buf
, "%u\n", khugepaged_pages_collapsed
);
191 static struct kobj_attribute pages_collapsed_attr
=
192 __ATTR_RO(pages_collapsed
);
194 static ssize_t
full_scans_show(struct kobject
*kobj
,
195 struct kobj_attribute
*attr
,
198 return sprintf(buf
, "%u\n", khugepaged_full_scans
);
200 static struct kobj_attribute full_scans_attr
=
201 __ATTR_RO(full_scans
);
203 static ssize_t
khugepaged_defrag_show(struct kobject
*kobj
,
204 struct kobj_attribute
*attr
, char *buf
)
206 return single_hugepage_flag_show(kobj
, attr
, buf
,
207 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
209 static ssize_t
khugepaged_defrag_store(struct kobject
*kobj
,
210 struct kobj_attribute
*attr
,
211 const char *buf
, size_t count
)
213 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
214 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
216 static struct kobj_attribute khugepaged_defrag_attr
=
217 __ATTR(defrag
, 0644, khugepaged_defrag_show
,
218 khugepaged_defrag_store
);
221 * max_ptes_none controls if khugepaged should collapse hugepages over
222 * any unmapped ptes in turn potentially increasing the memory
223 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
224 * reduce the available free memory in the system as it
225 * runs. Increasing max_ptes_none will instead potentially reduce the
226 * free memory in the system during the khugepaged scan.
228 static ssize_t
khugepaged_max_ptes_none_show(struct kobject
*kobj
,
229 struct kobj_attribute
*attr
,
232 return sprintf(buf
, "%u\n", khugepaged_max_ptes_none
);
234 static ssize_t
khugepaged_max_ptes_none_store(struct kobject
*kobj
,
235 struct kobj_attribute
*attr
,
236 const char *buf
, size_t count
)
239 unsigned long max_ptes_none
;
241 err
= kstrtoul(buf
, 10, &max_ptes_none
);
242 if (err
|| max_ptes_none
> HPAGE_PMD_NR
-1)
245 khugepaged_max_ptes_none
= max_ptes_none
;
249 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
250 __ATTR(max_ptes_none
, 0644, khugepaged_max_ptes_none_show
,
251 khugepaged_max_ptes_none_store
);
253 static ssize_t
khugepaged_max_ptes_swap_show(struct kobject
*kobj
,
254 struct kobj_attribute
*attr
,
257 return sprintf(buf
, "%u\n", khugepaged_max_ptes_swap
);
260 static ssize_t
khugepaged_max_ptes_swap_store(struct kobject
*kobj
,
261 struct kobj_attribute
*attr
,
262 const char *buf
, size_t count
)
265 unsigned long max_ptes_swap
;
267 err
= kstrtoul(buf
, 10, &max_ptes_swap
);
268 if (err
|| max_ptes_swap
> HPAGE_PMD_NR
-1)
271 khugepaged_max_ptes_swap
= max_ptes_swap
;
276 static struct kobj_attribute khugepaged_max_ptes_swap_attr
=
277 __ATTR(max_ptes_swap
, 0644, khugepaged_max_ptes_swap_show
,
278 khugepaged_max_ptes_swap_store
);
280 static struct attribute
*khugepaged_attr
[] = {
281 &khugepaged_defrag_attr
.attr
,
282 &khugepaged_max_ptes_none_attr
.attr
,
283 &pages_to_scan_attr
.attr
,
284 &pages_collapsed_attr
.attr
,
285 &full_scans_attr
.attr
,
286 &scan_sleep_millisecs_attr
.attr
,
287 &alloc_sleep_millisecs_attr
.attr
,
288 &khugepaged_max_ptes_swap_attr
.attr
,
292 struct attribute_group khugepaged_attr_group
= {
293 .attrs
= khugepaged_attr
,
294 .name
= "khugepaged",
297 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
299 int hugepage_madvise(struct vm_area_struct
*vma
,
300 unsigned long *vm_flags
, int advice
)
306 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
307 * can't handle this properly after s390_enable_sie, so we simply
308 * ignore the madvise to prevent qemu from causing a SIGSEGV.
310 if (mm_has_pgste(vma
->vm_mm
))
313 *vm_flags
&= ~VM_NOHUGEPAGE
;
314 *vm_flags
|= VM_HUGEPAGE
;
316 * If the vma become good for khugepaged to scan,
317 * register it here without waiting a page fault that
318 * may not happen any time soon.
320 if (!(*vm_flags
& VM_NO_KHUGEPAGED
) &&
321 khugepaged_enter_vma_merge(vma
, *vm_flags
))
324 case MADV_NOHUGEPAGE
:
325 *vm_flags
&= ~VM_HUGEPAGE
;
326 *vm_flags
|= VM_NOHUGEPAGE
;
328 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
329 * this vma even if we leave the mm registered in khugepaged if
330 * it got registered before VM_NOHUGEPAGE was set.
338 int __init
khugepaged_init(void)
340 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
341 sizeof(struct mm_slot
),
342 __alignof__(struct mm_slot
), 0, NULL
);
346 khugepaged_pages_to_scan
= HPAGE_PMD_NR
* 8;
347 khugepaged_max_ptes_none
= HPAGE_PMD_NR
- 1;
348 khugepaged_max_ptes_swap
= HPAGE_PMD_NR
/ 8;
353 void __init
khugepaged_destroy(void)
355 kmem_cache_destroy(mm_slot_cache
);
358 static inline struct mm_slot
*alloc_mm_slot(void)
360 if (!mm_slot_cache
) /* initialization failed */
362 return kmem_cache_zalloc(mm_slot_cache
, GFP_KERNEL
);
365 static inline void free_mm_slot(struct mm_slot
*mm_slot
)
367 kmem_cache_free(mm_slot_cache
, mm_slot
);
370 static struct mm_slot
*get_mm_slot(struct mm_struct
*mm
)
372 struct mm_slot
*mm_slot
;
374 hash_for_each_possible(mm_slots_hash
, mm_slot
, hash
, (unsigned long)mm
)
375 if (mm
== mm_slot
->mm
)
381 static void insert_to_mm_slots_hash(struct mm_struct
*mm
,
382 struct mm_slot
*mm_slot
)
385 hash_add(mm_slots_hash
, &mm_slot
->hash
, (long)mm
);
388 static inline int khugepaged_test_exit(struct mm_struct
*mm
)
390 return atomic_read(&mm
->mm_users
) == 0;
393 int __khugepaged_enter(struct mm_struct
*mm
)
395 struct mm_slot
*mm_slot
;
398 mm_slot
= alloc_mm_slot();
402 /* __khugepaged_exit() must not run from under us */
403 VM_BUG_ON_MM(khugepaged_test_exit(mm
), mm
);
404 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))) {
405 free_mm_slot(mm_slot
);
409 spin_lock(&khugepaged_mm_lock
);
410 insert_to_mm_slots_hash(mm
, mm_slot
);
412 * Insert just behind the scanning cursor, to let the area settle
415 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
416 list_add_tail(&mm_slot
->mm_node
, &khugepaged_scan
.mm_head
);
417 spin_unlock(&khugepaged_mm_lock
);
419 atomic_inc(&mm
->mm_count
);
421 wake_up_interruptible(&khugepaged_wait
);
426 int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
,
427 unsigned long vm_flags
)
429 unsigned long hstart
, hend
;
432 * Not yet faulted in so we will register later in the
433 * page fault if needed.
436 if (vma
->vm_ops
|| (vm_flags
& VM_NO_KHUGEPAGED
))
437 /* khugepaged not yet working on file or special mappings */
439 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
440 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
442 return khugepaged_enter(vma
, vm_flags
);
446 void __khugepaged_exit(struct mm_struct
*mm
)
448 struct mm_slot
*mm_slot
;
451 spin_lock(&khugepaged_mm_lock
);
452 mm_slot
= get_mm_slot(mm
);
453 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
454 hash_del(&mm_slot
->hash
);
455 list_del(&mm_slot
->mm_node
);
458 spin_unlock(&khugepaged_mm_lock
);
461 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
462 free_mm_slot(mm_slot
);
464 } else if (mm_slot
) {
466 * This is required to serialize against
467 * khugepaged_test_exit() (which is guaranteed to run
468 * under mmap sem read mode). Stop here (after we
469 * return all pagetables will be destroyed) until
470 * khugepaged has finished working on the pagetables
471 * under the mmap_sem.
473 down_write(&mm
->mmap_sem
);
474 up_write(&mm
->mmap_sem
);
478 static void release_pte_page(struct page
*page
)
480 /* 0 stands for page_is_file_cache(page) == false */
481 dec_zone_page_state(page
, NR_ISOLATED_ANON
+ 0);
483 putback_lru_page(page
);
486 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
)
488 while (--_pte
>= pte
) {
489 pte_t pteval
= *_pte
;
490 if (!pte_none(pteval
) && !is_zero_pfn(pte_pfn(pteval
)))
491 release_pte_page(pte_page(pteval
));
495 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
496 unsigned long address
,
499 struct page
*page
= NULL
;
501 int none_or_zero
= 0, result
= 0;
502 bool referenced
= false, writable
= false;
504 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
505 _pte
++, address
+= PAGE_SIZE
) {
506 pte_t pteval
= *_pte
;
507 if (pte_none(pteval
) || (pte_present(pteval
) &&
508 is_zero_pfn(pte_pfn(pteval
)))) {
509 if (!userfaultfd_armed(vma
) &&
510 ++none_or_zero
<= khugepaged_max_ptes_none
) {
513 result
= SCAN_EXCEED_NONE_PTE
;
517 if (!pte_present(pteval
)) {
518 result
= SCAN_PTE_NON_PRESENT
;
521 page
= vm_normal_page(vma
, address
, pteval
);
522 if (unlikely(!page
)) {
523 result
= SCAN_PAGE_NULL
;
527 VM_BUG_ON_PAGE(PageCompound(page
), page
);
528 VM_BUG_ON_PAGE(!PageAnon(page
), page
);
529 VM_BUG_ON_PAGE(!PageSwapBacked(page
), page
);
532 * We can do it before isolate_lru_page because the
533 * page can't be freed from under us. NOTE: PG_lock
534 * is needed to serialize against split_huge_page
535 * when invoked from the VM.
537 if (!trylock_page(page
)) {
538 result
= SCAN_PAGE_LOCK
;
543 * cannot use mapcount: can't collapse if there's a gup pin.
544 * The page must only be referenced by the scanned process
545 * and page swap cache.
547 if (page_count(page
) != 1 + !!PageSwapCache(page
)) {
549 result
= SCAN_PAGE_COUNT
;
552 if (pte_write(pteval
)) {
555 if (PageSwapCache(page
) &&
556 !reuse_swap_page(page
, NULL
)) {
558 result
= SCAN_SWAP_CACHE_PAGE
;
562 * Page is not in the swap cache. It can be collapsed
568 * Isolate the page to avoid collapsing an hugepage
569 * currently in use by the VM.
571 if (isolate_lru_page(page
)) {
573 result
= SCAN_DEL_PAGE_LRU
;
576 /* 0 stands for page_is_file_cache(page) == false */
577 inc_zone_page_state(page
, NR_ISOLATED_ANON
+ 0);
578 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
579 VM_BUG_ON_PAGE(PageLRU(page
), page
);
581 /* If there is no mapped pte young don't collapse the page */
582 if (pte_young(pteval
) ||
583 page_is_young(page
) || PageReferenced(page
) ||
584 mmu_notifier_test_young(vma
->vm_mm
, address
))
587 if (likely(writable
)) {
588 if (likely(referenced
)) {
589 result
= SCAN_SUCCEED
;
590 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
591 referenced
, writable
, result
);
595 result
= SCAN_PAGE_RO
;
599 release_pte_pages(pte
, _pte
);
600 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
601 referenced
, writable
, result
);
605 static void __collapse_huge_page_copy(pte_t
*pte
, struct page
*page
,
606 struct vm_area_struct
*vma
,
607 unsigned long address
,
611 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
; _pte
++) {
612 pte_t pteval
= *_pte
;
613 struct page
*src_page
;
615 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
616 clear_user_highpage(page
, address
);
617 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
618 if (is_zero_pfn(pte_pfn(pteval
))) {
620 * ptl mostly unnecessary.
624 * paravirt calls inside pte_clear here are
627 pte_clear(vma
->vm_mm
, address
, _pte
);
631 src_page
= pte_page(pteval
);
632 copy_user_highpage(page
, src_page
, address
, vma
);
633 VM_BUG_ON_PAGE(page_mapcount(src_page
) != 1, src_page
);
634 release_pte_page(src_page
);
636 * ptl mostly unnecessary, but preempt has to
637 * be disabled to update the per-cpu stats
638 * inside page_remove_rmap().
642 * paravirt calls inside pte_clear here are
645 pte_clear(vma
->vm_mm
, address
, _pte
);
646 page_remove_rmap(src_page
, false);
648 free_page_and_swap_cache(src_page
);
651 address
+= PAGE_SIZE
;
656 static void khugepaged_alloc_sleep(void)
660 add_wait_queue(&khugepaged_wait
, &wait
);
661 freezable_schedule_timeout_interruptible(
662 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs
));
663 remove_wait_queue(&khugepaged_wait
, &wait
);
666 static int khugepaged_node_load
[MAX_NUMNODES
];
668 static bool khugepaged_scan_abort(int nid
)
673 * If zone_reclaim_mode is disabled, then no extra effort is made to
674 * allocate memory locally.
676 if (!zone_reclaim_mode
)
679 /* If there is a count for this node already, it must be acceptable */
680 if (khugepaged_node_load
[nid
])
683 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
684 if (!khugepaged_node_load
[i
])
686 if (node_distance(nid
, i
) > RECLAIM_DISTANCE
)
692 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
693 static inline gfp_t
alloc_hugepage_khugepaged_gfpmask(void)
695 return GFP_TRANSHUGE
| (khugepaged_defrag() ? __GFP_DIRECT_RECLAIM
: 0);
699 static int khugepaged_find_target_node(void)
701 static int last_khugepaged_target_node
= NUMA_NO_NODE
;
702 int nid
, target_node
= 0, max_value
= 0;
704 /* find first node with max normal pages hit */
705 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
706 if (khugepaged_node_load
[nid
] > max_value
) {
707 max_value
= khugepaged_node_load
[nid
];
711 /* do some balance if several nodes have the same hit record */
712 if (target_node
<= last_khugepaged_target_node
)
713 for (nid
= last_khugepaged_target_node
+ 1; nid
< MAX_NUMNODES
;
715 if (max_value
== khugepaged_node_load
[nid
]) {
720 last_khugepaged_target_node
= target_node
;
724 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
726 if (IS_ERR(*hpage
)) {
732 khugepaged_alloc_sleep();
742 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
744 VM_BUG_ON_PAGE(*hpage
, *hpage
);
746 *hpage
= __alloc_pages_node(node
, gfp
, HPAGE_PMD_ORDER
);
747 if (unlikely(!*hpage
)) {
748 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
749 *hpage
= ERR_PTR(-ENOMEM
);
753 prep_transhuge_page(*hpage
);
754 count_vm_event(THP_COLLAPSE_ALLOC
);
758 static int khugepaged_find_target_node(void)
763 static inline struct page
*alloc_khugepaged_hugepage(void)
767 page
= alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
770 prep_transhuge_page(page
);
774 static struct page
*khugepaged_alloc_hugepage(bool *wait
)
779 hpage
= alloc_khugepaged_hugepage();
781 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
786 khugepaged_alloc_sleep();
788 count_vm_event(THP_COLLAPSE_ALLOC
);
789 } while (unlikely(!hpage
) && likely(khugepaged_enabled()));
794 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
797 *hpage
= khugepaged_alloc_hugepage(wait
);
799 if (unlikely(!*hpage
))
806 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
814 static bool hugepage_vma_check(struct vm_area_struct
*vma
)
816 if ((!(vma
->vm_flags
& VM_HUGEPAGE
) && !khugepaged_always()) ||
817 (vma
->vm_flags
& VM_NOHUGEPAGE
))
819 if (!vma
->anon_vma
|| vma
->vm_ops
)
821 if (is_vma_temporary_stack(vma
))
823 return !(vma
->vm_flags
& VM_NO_KHUGEPAGED
);
827 * If mmap_sem temporarily dropped, revalidate vma
828 * before taking mmap_sem.
829 * Return 0 if succeeds, otherwise return none-zero
833 static int hugepage_vma_revalidate(struct mm_struct
*mm
, unsigned long address
)
835 struct vm_area_struct
*vma
;
836 unsigned long hstart
, hend
;
838 if (unlikely(khugepaged_test_exit(mm
)))
839 return SCAN_ANY_PROCESS
;
841 vma
= find_vma(mm
, address
);
843 return SCAN_VMA_NULL
;
845 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
846 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
847 if (address
< hstart
|| address
+ HPAGE_PMD_SIZE
> hend
)
848 return SCAN_ADDRESS_RANGE
;
849 if (!hugepage_vma_check(vma
))
850 return SCAN_VMA_CHECK
;
855 * Bring missing pages in from swap, to complete THP collapse.
856 * Only done if khugepaged_scan_pmd believes it is worthwhile.
858 * Called and returns without pte mapped or spinlocks held,
859 * but with mmap_sem held to protect against vma changes.
862 static bool __collapse_huge_page_swapin(struct mm_struct
*mm
,
863 struct vm_area_struct
*vma
,
864 unsigned long address
, pmd_t
*pmd
)
867 int swapped_in
= 0, ret
= 0;
868 struct fault_env fe
= {
871 .flags
= FAULT_FLAG_ALLOW_RETRY
,
875 fe
.pte
= pte_offset_map(pmd
, address
);
876 for (; fe
.address
< address
+ HPAGE_PMD_NR
*PAGE_SIZE
;
877 fe
.pte
++, fe
.address
+= PAGE_SIZE
) {
879 if (!is_swap_pte(pteval
))
882 ret
= do_swap_page(&fe
, pteval
);
883 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
884 if (ret
& VM_FAULT_RETRY
) {
885 down_read(&mm
->mmap_sem
);
886 /* vma is no longer available, don't continue to swapin */
887 if (hugepage_vma_revalidate(mm
, address
))
889 /* check if the pmd is still valid */
890 if (mm_find_pmd(mm
, address
) != pmd
)
893 if (ret
& VM_FAULT_ERROR
) {
894 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, 0);
897 /* pte is unmapped now, we need to map it */
898 fe
.pte
= pte_offset_map(pmd
, fe
.address
);
902 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, 1);
906 static void collapse_huge_page(struct mm_struct
*mm
,
907 unsigned long address
,
909 struct vm_area_struct
*vma
,
915 struct page
*new_page
;
916 spinlock_t
*pmd_ptl
, *pte_ptl
;
917 int isolated
= 0, result
= 0;
918 struct mem_cgroup
*memcg
;
919 unsigned long mmun_start
; /* For mmu_notifiers */
920 unsigned long mmun_end
; /* For mmu_notifiers */
923 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
925 /* Only allocate from the target node */
926 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE
| __GFP_THISNODE
;
929 * Before allocating the hugepage, release the mmap_sem read lock.
930 * The allocation can take potentially a long time if it involves
931 * sync compaction, and we do not need to hold the mmap_sem during
932 * that. We will recheck the vma after taking it again in write mode.
934 up_read(&mm
->mmap_sem
);
935 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
937 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
941 if (unlikely(mem_cgroup_try_charge(new_page
, mm
, gfp
, &memcg
, true))) {
942 result
= SCAN_CGROUP_CHARGE_FAIL
;
946 down_read(&mm
->mmap_sem
);
947 result
= hugepage_vma_revalidate(mm
, address
);
949 mem_cgroup_cancel_charge(new_page
, memcg
, true);
950 up_read(&mm
->mmap_sem
);
954 pmd
= mm_find_pmd(mm
, address
);
956 result
= SCAN_PMD_NULL
;
957 mem_cgroup_cancel_charge(new_page
, memcg
, true);
958 up_read(&mm
->mmap_sem
);
963 * __collapse_huge_page_swapin always returns with mmap_sem locked.
964 * If it fails, release mmap_sem and jump directly out.
965 * Continuing to collapse causes inconsistency.
967 if (!__collapse_huge_page_swapin(mm
, vma
, address
, pmd
)) {
968 mem_cgroup_cancel_charge(new_page
, memcg
, true);
969 up_read(&mm
->mmap_sem
);
973 up_read(&mm
->mmap_sem
);
975 * Prevent all access to pagetables with the exception of
976 * gup_fast later handled by the ptep_clear_flush and the VM
977 * handled by the anon_vma lock + PG_lock.
979 down_write(&mm
->mmap_sem
);
980 result
= hugepage_vma_revalidate(mm
, address
);
983 /* check if the pmd is still valid */
984 if (mm_find_pmd(mm
, address
) != pmd
)
987 anon_vma_lock_write(vma
->anon_vma
);
989 pte
= pte_offset_map(pmd
, address
);
990 pte_ptl
= pte_lockptr(mm
, pmd
);
992 mmun_start
= address
;
993 mmun_end
= address
+ HPAGE_PMD_SIZE
;
994 mmu_notifier_invalidate_range_start(mm
, mmun_start
, mmun_end
);
995 pmd_ptl
= pmd_lock(mm
, pmd
); /* probably unnecessary */
997 * After this gup_fast can't run anymore. This also removes
998 * any huge TLB entry from the CPU so we won't allow
999 * huge and small TLB entries for the same virtual address
1000 * to avoid the risk of CPU bugs in that area.
1002 _pmd
= pmdp_collapse_flush(vma
, address
, pmd
);
1003 spin_unlock(pmd_ptl
);
1004 mmu_notifier_invalidate_range_end(mm
, mmun_start
, mmun_end
);
1007 isolated
= __collapse_huge_page_isolate(vma
, address
, pte
);
1008 spin_unlock(pte_ptl
);
1010 if (unlikely(!isolated
)) {
1013 BUG_ON(!pmd_none(*pmd
));
1015 * We can only use set_pmd_at when establishing
1016 * hugepmds and never for establishing regular pmds that
1017 * points to regular pagetables. Use pmd_populate for that
1019 pmd_populate(mm
, pmd
, pmd_pgtable(_pmd
));
1020 spin_unlock(pmd_ptl
);
1021 anon_vma_unlock_write(vma
->anon_vma
);
1027 * All pages are isolated and locked so anon_vma rmap
1028 * can't run anymore.
1030 anon_vma_unlock_write(vma
->anon_vma
);
1032 __collapse_huge_page_copy(pte
, new_page
, vma
, address
, pte_ptl
);
1034 __SetPageUptodate(new_page
);
1035 pgtable
= pmd_pgtable(_pmd
);
1037 _pmd
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1038 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1041 * spin_lock() below is not the equivalent of smp_wmb(), so
1042 * this is needed to avoid the copy_huge_page writes to become
1043 * visible after the set_pmd_at() write.
1048 BUG_ON(!pmd_none(*pmd
));
1049 page_add_new_anon_rmap(new_page
, vma
, address
, true);
1050 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1051 lru_cache_add_active_or_unevictable(new_page
, vma
);
1052 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
1053 set_pmd_at(mm
, address
, pmd
, _pmd
);
1054 update_mmu_cache_pmd(vma
, address
, pmd
);
1055 spin_unlock(pmd_ptl
);
1059 khugepaged_pages_collapsed
++;
1060 result
= SCAN_SUCCEED
;
1062 up_write(&mm
->mmap_sem
);
1064 trace_mm_collapse_huge_page(mm
, isolated
, result
);
1067 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1071 static int khugepaged_scan_pmd(struct mm_struct
*mm
,
1072 struct vm_area_struct
*vma
,
1073 unsigned long address
,
1074 struct page
**hpage
)
1078 int ret
= 0, none_or_zero
= 0, result
= 0;
1079 struct page
*page
= NULL
;
1080 unsigned long _address
;
1082 int node
= NUMA_NO_NODE
, unmapped
= 0;
1083 bool writable
= false, referenced
= false;
1085 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1087 pmd
= mm_find_pmd(mm
, address
);
1089 result
= SCAN_PMD_NULL
;
1093 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
1094 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1095 for (_address
= address
, _pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1096 _pte
++, _address
+= PAGE_SIZE
) {
1097 pte_t pteval
= *_pte
;
1098 if (is_swap_pte(pteval
)) {
1099 if (++unmapped
<= khugepaged_max_ptes_swap
) {
1102 result
= SCAN_EXCEED_SWAP_PTE
;
1106 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
1107 if (!userfaultfd_armed(vma
) &&
1108 ++none_or_zero
<= khugepaged_max_ptes_none
) {
1111 result
= SCAN_EXCEED_NONE_PTE
;
1115 if (!pte_present(pteval
)) {
1116 result
= SCAN_PTE_NON_PRESENT
;
1119 if (pte_write(pteval
))
1122 page
= vm_normal_page(vma
, _address
, pteval
);
1123 if (unlikely(!page
)) {
1124 result
= SCAN_PAGE_NULL
;
1128 /* TODO: teach khugepaged to collapse THP mapped with pte */
1129 if (PageCompound(page
)) {
1130 result
= SCAN_PAGE_COMPOUND
;
1135 * Record which node the original page is from and save this
1136 * information to khugepaged_node_load[].
1137 * Khupaged will allocate hugepage from the node has the max
1140 node
= page_to_nid(page
);
1141 if (khugepaged_scan_abort(node
)) {
1142 result
= SCAN_SCAN_ABORT
;
1145 khugepaged_node_load
[node
]++;
1146 if (!PageLRU(page
)) {
1147 result
= SCAN_PAGE_LRU
;
1150 if (PageLocked(page
)) {
1151 result
= SCAN_PAGE_LOCK
;
1154 if (!PageAnon(page
)) {
1155 result
= SCAN_PAGE_ANON
;
1160 * cannot use mapcount: can't collapse if there's a gup pin.
1161 * The page must only be referenced by the scanned process
1162 * and page swap cache.
1164 if (page_count(page
) != 1 + !!PageSwapCache(page
)) {
1165 result
= SCAN_PAGE_COUNT
;
1168 if (pte_young(pteval
) ||
1169 page_is_young(page
) || PageReferenced(page
) ||
1170 mmu_notifier_test_young(vma
->vm_mm
, address
))
1175 result
= SCAN_SUCCEED
;
1178 result
= SCAN_NO_REFERENCED_PAGE
;
1181 result
= SCAN_PAGE_RO
;
1184 pte_unmap_unlock(pte
, ptl
);
1186 node
= khugepaged_find_target_node();
1187 /* collapse_huge_page will return with the mmap_sem released */
1188 collapse_huge_page(mm
, address
, hpage
, vma
, node
);
1191 trace_mm_khugepaged_scan_pmd(mm
, page
, writable
, referenced
,
1192 none_or_zero
, result
, unmapped
);
1196 static void collect_mm_slot(struct mm_slot
*mm_slot
)
1198 struct mm_struct
*mm
= mm_slot
->mm
;
1200 VM_BUG_ON(NR_CPUS
!= 1 && !spin_is_locked(&khugepaged_mm_lock
));
1202 if (khugepaged_test_exit(mm
)) {
1204 hash_del(&mm_slot
->hash
);
1205 list_del(&mm_slot
->mm_node
);
1208 * Not strictly needed because the mm exited already.
1210 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1213 /* khugepaged_mm_lock actually not necessary for the below */
1214 free_mm_slot(mm_slot
);
1219 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
,
1220 struct page
**hpage
)
1221 __releases(&khugepaged_mm_lock
)
1222 __acquires(&khugepaged_mm_lock
)
1224 struct mm_slot
*mm_slot
;
1225 struct mm_struct
*mm
;
1226 struct vm_area_struct
*vma
;
1230 VM_BUG_ON(NR_CPUS
!= 1 && !spin_is_locked(&khugepaged_mm_lock
));
1232 if (khugepaged_scan
.mm_slot
)
1233 mm_slot
= khugepaged_scan
.mm_slot
;
1235 mm_slot
= list_entry(khugepaged_scan
.mm_head
.next
,
1236 struct mm_slot
, mm_node
);
1237 khugepaged_scan
.address
= 0;
1238 khugepaged_scan
.mm_slot
= mm_slot
;
1240 spin_unlock(&khugepaged_mm_lock
);
1243 down_read(&mm
->mmap_sem
);
1244 if (unlikely(khugepaged_test_exit(mm
)))
1247 vma
= find_vma(mm
, khugepaged_scan
.address
);
1250 for (; vma
; vma
= vma
->vm_next
) {
1251 unsigned long hstart
, hend
;
1254 if (unlikely(khugepaged_test_exit(mm
))) {
1258 if (!hugepage_vma_check(vma
)) {
1263 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1264 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1267 if (khugepaged_scan
.address
> hend
)
1269 if (khugepaged_scan
.address
< hstart
)
1270 khugepaged_scan
.address
= hstart
;
1271 VM_BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
1273 while (khugepaged_scan
.address
< hend
) {
1276 if (unlikely(khugepaged_test_exit(mm
)))
1277 goto breakouterloop
;
1279 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
1280 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
1282 ret
= khugepaged_scan_pmd(mm
, vma
,
1283 khugepaged_scan
.address
,
1285 /* move to next address */
1286 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
1287 progress
+= HPAGE_PMD_NR
;
1289 /* we released mmap_sem so break loop */
1290 goto breakouterloop_mmap_sem
;
1291 if (progress
>= pages
)
1292 goto breakouterloop
;
1296 up_read(&mm
->mmap_sem
); /* exit_mmap will destroy ptes after this */
1297 breakouterloop_mmap_sem
:
1299 spin_lock(&khugepaged_mm_lock
);
1300 VM_BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
1302 * Release the current mm_slot if this mm is about to die, or
1303 * if we scanned all vmas of this mm.
1305 if (khugepaged_test_exit(mm
) || !vma
) {
1307 * Make sure that if mm_users is reaching zero while
1308 * khugepaged runs here, khugepaged_exit will find
1309 * mm_slot not pointing to the exiting mm.
1311 if (mm_slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
1312 khugepaged_scan
.mm_slot
= list_entry(
1313 mm_slot
->mm_node
.next
,
1314 struct mm_slot
, mm_node
);
1315 khugepaged_scan
.address
= 0;
1317 khugepaged_scan
.mm_slot
= NULL
;
1318 khugepaged_full_scans
++;
1321 collect_mm_slot(mm_slot
);
1327 static int khugepaged_has_work(void)
1329 return !list_empty(&khugepaged_scan
.mm_head
) &&
1330 khugepaged_enabled();
1333 static int khugepaged_wait_event(void)
1335 return !list_empty(&khugepaged_scan
.mm_head
) ||
1336 kthread_should_stop();
1339 static void khugepaged_do_scan(void)
1341 struct page
*hpage
= NULL
;
1342 unsigned int progress
= 0, pass_through_head
= 0;
1343 unsigned int pages
= khugepaged_pages_to_scan
;
1346 barrier(); /* write khugepaged_pages_to_scan to local stack */
1348 while (progress
< pages
) {
1349 if (!khugepaged_prealloc_page(&hpage
, &wait
))
1354 if (unlikely(kthread_should_stop() || try_to_freeze()))
1357 spin_lock(&khugepaged_mm_lock
);
1358 if (!khugepaged_scan
.mm_slot
)
1359 pass_through_head
++;
1360 if (khugepaged_has_work() &&
1361 pass_through_head
< 2)
1362 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
1366 spin_unlock(&khugepaged_mm_lock
);
1369 if (!IS_ERR_OR_NULL(hpage
))
1373 static bool khugepaged_should_wakeup(void)
1375 return kthread_should_stop() ||
1376 time_after_eq(jiffies
, khugepaged_sleep_expire
);
1379 static void khugepaged_wait_work(void)
1381 if (khugepaged_has_work()) {
1382 const unsigned long scan_sleep_jiffies
=
1383 msecs_to_jiffies(khugepaged_scan_sleep_millisecs
);
1385 if (!scan_sleep_jiffies
)
1388 khugepaged_sleep_expire
= jiffies
+ scan_sleep_jiffies
;
1389 wait_event_freezable_timeout(khugepaged_wait
,
1390 khugepaged_should_wakeup(),
1391 scan_sleep_jiffies
);
1395 if (khugepaged_enabled())
1396 wait_event_freezable(khugepaged_wait
, khugepaged_wait_event());
1399 static int khugepaged(void *none
)
1401 struct mm_slot
*mm_slot
;
1404 set_user_nice(current
, MAX_NICE
);
1406 while (!kthread_should_stop()) {
1407 khugepaged_do_scan();
1408 khugepaged_wait_work();
1411 spin_lock(&khugepaged_mm_lock
);
1412 mm_slot
= khugepaged_scan
.mm_slot
;
1413 khugepaged_scan
.mm_slot
= NULL
;
1415 collect_mm_slot(mm_slot
);
1416 spin_unlock(&khugepaged_mm_lock
);
1420 static void set_recommended_min_free_kbytes(void)
1424 unsigned long recommended_min
;
1426 for_each_populated_zone(zone
)
1429 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1430 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
1433 * Make sure that on average at least two pageblocks are almost free
1434 * of another type, one for a migratetype to fall back to and a
1435 * second to avoid subsequent fallbacks of other types There are 3
1436 * MIGRATE_TYPES we care about.
1438 recommended_min
+= pageblock_nr_pages
* nr_zones
*
1439 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
1441 /* don't ever allow to reserve more than 5% of the lowmem */
1442 recommended_min
= min(recommended_min
,
1443 (unsigned long) nr_free_buffer_pages() / 20);
1444 recommended_min
<<= (PAGE_SHIFT
-10);
1446 if (recommended_min
> min_free_kbytes
) {
1447 if (user_min_free_kbytes
>= 0)
1448 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1449 min_free_kbytes
, recommended_min
);
1451 min_free_kbytes
= recommended_min
;
1453 setup_per_zone_wmarks();
1456 int start_stop_khugepaged(void)
1458 static struct task_struct
*khugepaged_thread __read_mostly
;
1459 static DEFINE_MUTEX(khugepaged_mutex
);
1462 mutex_lock(&khugepaged_mutex
);
1463 if (khugepaged_enabled()) {
1464 if (!khugepaged_thread
)
1465 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
1467 if (IS_ERR(khugepaged_thread
)) {
1468 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1469 err
= PTR_ERR(khugepaged_thread
);
1470 khugepaged_thread
= NULL
;
1474 if (!list_empty(&khugepaged_scan
.mm_head
))
1475 wake_up_interruptible(&khugepaged_wait
);
1477 set_recommended_min_free_kbytes();
1478 } else if (khugepaged_thread
) {
1479 kthread_stop(khugepaged_thread
);
1480 khugepaged_thread
= NULL
;
1483 mutex_unlock(&khugepaged_mutex
);