mm: use put_page() to free page instead of putback_lru_page()
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / migrate.c
CommitLineData
b20a3503 1/*
14e0f9bc 2 * Memory Migration functionality - linux/mm/migrate.c
b20a3503
CL
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
cde53535 12 * Christoph Lameter
b20a3503
CL
13 */
14
15#include <linux/migrate.h>
b95f1b31 16#include <linux/export.h>
b20a3503 17#include <linux/swap.h>
0697212a 18#include <linux/swapops.h>
b20a3503 19#include <linux/pagemap.h>
e23ca00b 20#include <linux/buffer_head.h>
b20a3503 21#include <linux/mm_inline.h>
b488893a 22#include <linux/nsproxy.h>
b20a3503 23#include <linux/pagevec.h>
e9995ef9 24#include <linux/ksm.h>
b20a3503
CL
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
04e62a29 29#include <linux/writeback.h>
742755a1
CL
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
86c3a764 32#include <linux/security.h>
42cb14b1 33#include <linux/backing-dev.h>
4f5ca265 34#include <linux/syscalls.h>
290408d4 35#include <linux/hugetlb.h>
8e6ac7fa 36#include <linux/hugetlb_cgroup.h>
5a0e3ad6 37#include <linux/gfp.h>
bf6bddf1 38#include <linux/balloon_compaction.h>
f714f4f2 39#include <linux/mmu_notifier.h>
33c3fc71 40#include <linux/page_idle.h>
d435edca 41#include <linux/page_owner.h>
b20a3503 42
0d1836c3
MN
43#include <asm/tlbflush.h>
44
7b2a2d4a
MG
45#define CREATE_TRACE_POINTS
46#include <trace/events/migrate.h>
47
b20a3503
CL
48#include "internal.h"
49
b20a3503 50/*
742755a1 51 * migrate_prep() needs to be called before we start compiling a list of pages
748446bb
MG
52 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
53 * undesirable, use migrate_prep_local()
b20a3503
CL
54 */
55int migrate_prep(void)
56{
b20a3503
CL
57 /*
58 * Clear the LRU lists so pages can be isolated.
59 * Note that pages may be moved off the LRU after we have
60 * drained them. Those pages will fail to migrate like other
61 * pages that may be busy.
62 */
63 lru_add_drain_all();
64
65 return 0;
66}
67
748446bb
MG
68/* Do the necessary work of migrate_prep but not if it involves other CPUs */
69int migrate_prep_local(void)
70{
71 lru_add_drain();
72
73 return 0;
74}
75
5733c7d1
RA
76/*
77 * Put previously isolated pages back onto the appropriate lists
78 * from where they were once taken off for compaction/migration.
79 *
59c82b70
JK
80 * This function shall be used whenever the isolated pageset has been
81 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
82 * and isolate_huge_page().
5733c7d1
RA
83 */
84void putback_movable_pages(struct list_head *l)
85{
86 struct page *page;
87 struct page *page2;
88
b20a3503 89 list_for_each_entry_safe(page, page2, l, lru) {
31caf665
NH
90 if (unlikely(PageHuge(page))) {
91 putback_active_hugepage(page);
92 continue;
93 }
e24f0b8f 94 list_del(&page->lru);
a731286d 95 dec_zone_page_state(page, NR_ISOLATED_ANON +
6c0b1351 96 page_is_file_cache(page));
117aad1e 97 if (unlikely(isolated_balloon_page(page)))
bf6bddf1
RA
98 balloon_page_putback(page);
99 else
100 putback_lru_page(page);
b20a3503 101 }
b20a3503
CL
102}
103
0697212a
CL
104/*
105 * Restore a potential migration pte to a working pte entry
106 */
e9995ef9
HD
107static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
108 unsigned long addr, void *old)
0697212a
CL
109{
110 struct mm_struct *mm = vma->vm_mm;
111 swp_entry_t entry;
0697212a
CL
112 pmd_t *pmd;
113 pte_t *ptep, pte;
114 spinlock_t *ptl;
115
290408d4
NH
116 if (unlikely(PageHuge(new))) {
117 ptep = huge_pte_offset(mm, addr);
118 if (!ptep)
119 goto out;
cb900f41 120 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
290408d4 121 } else {
6219049a
BL
122 pmd = mm_find_pmd(mm, addr);
123 if (!pmd)
290408d4 124 goto out;
0697212a 125
290408d4 126 ptep = pte_offset_map(pmd, addr);
0697212a 127
486cf46f
HD
128 /*
129 * Peek to check is_swap_pte() before taking ptlock? No, we
130 * can race mremap's move_ptes(), which skips anon_vma lock.
131 */
290408d4
NH
132
133 ptl = pte_lockptr(mm, pmd);
134 }
0697212a 135
0697212a
CL
136 spin_lock(ptl);
137 pte = *ptep;
138 if (!is_swap_pte(pte))
e9995ef9 139 goto unlock;
0697212a
CL
140
141 entry = pte_to_swp_entry(pte);
142
e9995ef9
HD
143 if (!is_migration_entry(entry) ||
144 migration_entry_to_page(entry) != old)
145 goto unlock;
0697212a 146
0697212a
CL
147 get_page(new);
148 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
c3d16e16
CG
149 if (pte_swp_soft_dirty(*ptep))
150 pte = pte_mksoft_dirty(pte);
d3cb8bf6
MG
151
152 /* Recheck VMA as permissions can change since migration started */
0697212a 153 if (is_write_migration_entry(entry))
d3cb8bf6
MG
154 pte = maybe_mkwrite(pte, vma);
155
3ef8fd7f 156#ifdef CONFIG_HUGETLB_PAGE
be7517d6 157 if (PageHuge(new)) {
290408d4 158 pte = pte_mkhuge(pte);
be7517d6
TL
159 pte = arch_make_huge_pte(pte, vma, new, 0);
160 }
3ef8fd7f 161#endif
c2cc499c 162 flush_dcache_page(new);
0697212a 163 set_pte_at(mm, addr, ptep, pte);
04e62a29 164
290408d4
NH
165 if (PageHuge(new)) {
166 if (PageAnon(new))
167 hugepage_add_anon_rmap(new, vma, addr);
168 else
53f9263b 169 page_dup_rmap(new, true);
290408d4 170 } else if (PageAnon(new))
d281ee61 171 page_add_anon_rmap(new, vma, addr, false);
04e62a29
CL
172 else
173 page_add_file_rmap(new);
174
e388466d 175 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
51afb12b
HD
176 mlock_vma_page(new);
177
04e62a29 178 /* No need to invalidate - it was non-present before */
4b3073e1 179 update_mmu_cache(vma, addr, ptep);
e9995ef9 180unlock:
0697212a 181 pte_unmap_unlock(ptep, ptl);
e9995ef9
HD
182out:
183 return SWAP_AGAIN;
0697212a
CL
184}
185
04e62a29
CL
186/*
187 * Get rid of all migration entries and replace them by
188 * references to the indicated page.
189 */
e388466d 190void remove_migration_ptes(struct page *old, struct page *new, bool locked)
04e62a29 191{
051ac83a
JK
192 struct rmap_walk_control rwc = {
193 .rmap_one = remove_migration_pte,
194 .arg = old,
195 };
196
e388466d
KS
197 if (locked)
198 rmap_walk_locked(new, &rwc);
199 else
200 rmap_walk(new, &rwc);
04e62a29
CL
201}
202
0697212a
CL
203/*
204 * Something used the pte of a page under migration. We need to
205 * get to the page and wait until migration is finished.
206 * When we return from this function the fault will be retried.
0697212a 207 */
e66f17ff 208void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
30dad309 209 spinlock_t *ptl)
0697212a 210{
30dad309 211 pte_t pte;
0697212a
CL
212 swp_entry_t entry;
213 struct page *page;
214
30dad309 215 spin_lock(ptl);
0697212a
CL
216 pte = *ptep;
217 if (!is_swap_pte(pte))
218 goto out;
219
220 entry = pte_to_swp_entry(pte);
221 if (!is_migration_entry(entry))
222 goto out;
223
224 page = migration_entry_to_page(entry);
225
e286781d
NP
226 /*
227 * Once radix-tree replacement of page migration started, page_count
228 * *must* be zero. And, we don't want to call wait_on_page_locked()
229 * against a page without get_page().
230 * So, we use get_page_unless_zero(), here. Even failed, page fault
231 * will occur again.
232 */
233 if (!get_page_unless_zero(page))
234 goto out;
0697212a
CL
235 pte_unmap_unlock(ptep, ptl);
236 wait_on_page_locked(page);
237 put_page(page);
238 return;
239out:
240 pte_unmap_unlock(ptep, ptl);
241}
242
30dad309
NH
243void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
244 unsigned long address)
245{
246 spinlock_t *ptl = pte_lockptr(mm, pmd);
247 pte_t *ptep = pte_offset_map(pmd, address);
248 __migration_entry_wait(mm, ptep, ptl);
249}
250
cb900f41
KS
251void migration_entry_wait_huge(struct vm_area_struct *vma,
252 struct mm_struct *mm, pte_t *pte)
30dad309 253{
cb900f41 254 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
30dad309
NH
255 __migration_entry_wait(mm, pte, ptl);
256}
257
b969c4ab
MG
258#ifdef CONFIG_BLOCK
259/* Returns true if all buffers are successfully locked */
a6bc32b8
MG
260static bool buffer_migrate_lock_buffers(struct buffer_head *head,
261 enum migrate_mode mode)
b969c4ab
MG
262{
263 struct buffer_head *bh = head;
264
265 /* Simple case, sync compaction */
a6bc32b8 266 if (mode != MIGRATE_ASYNC) {
b969c4ab
MG
267 do {
268 get_bh(bh);
269 lock_buffer(bh);
270 bh = bh->b_this_page;
271
272 } while (bh != head);
273
274 return true;
275 }
276
277 /* async case, we cannot block on lock_buffer so use trylock_buffer */
278 do {
279 get_bh(bh);
280 if (!trylock_buffer(bh)) {
281 /*
282 * We failed to lock the buffer and cannot stall in
283 * async migration. Release the taken locks
284 */
285 struct buffer_head *failed_bh = bh;
286 put_bh(failed_bh);
287 bh = head;
288 while (bh != failed_bh) {
289 unlock_buffer(bh);
290 put_bh(bh);
291 bh = bh->b_this_page;
292 }
293 return false;
294 }
295
296 bh = bh->b_this_page;
297 } while (bh != head);
298 return true;
299}
300#else
301static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
a6bc32b8 302 enum migrate_mode mode)
b969c4ab
MG
303{
304 return true;
305}
306#endif /* CONFIG_BLOCK */
307
b20a3503 308/*
c3fcf8a5 309 * Replace the page in the mapping.
5b5c7120
CL
310 *
311 * The number of remaining references must be:
312 * 1 for anonymous pages without a mapping
313 * 2 for pages with a mapping
266cf658 314 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 315 */
36bc08cc 316int migrate_page_move_mapping(struct address_space *mapping,
b969c4ab 317 struct page *newpage, struct page *page,
8e321fef
BL
318 struct buffer_head *head, enum migrate_mode mode,
319 int extra_count)
b20a3503 320{
42cb14b1
HD
321 struct zone *oldzone, *newzone;
322 int dirty;
8e321fef 323 int expected_count = 1 + extra_count;
7cf9c2c7 324 void **pslot;
b20a3503 325
6c5240ae 326 if (!mapping) {
0e8c7d0f 327 /* Anonymous page without mapping */
8e321fef 328 if (page_count(page) != expected_count)
6c5240ae 329 return -EAGAIN;
cf4b769a
HD
330
331 /* No turning back from here */
cf4b769a
HD
332 newpage->index = page->index;
333 newpage->mapping = page->mapping;
334 if (PageSwapBacked(page))
fa9949da 335 __SetPageSwapBacked(newpage);
cf4b769a 336
78bd5209 337 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
338 }
339
42cb14b1
HD
340 oldzone = page_zone(page);
341 newzone = page_zone(newpage);
342
19fd6231 343 spin_lock_irq(&mapping->tree_lock);
b20a3503 344
7cf9c2c7
NP
345 pslot = radix_tree_lookup_slot(&mapping->page_tree,
346 page_index(page));
b20a3503 347
8e321fef 348 expected_count += 1 + page_has_private(page);
e286781d 349 if (page_count(page) != expected_count ||
29c1f677 350 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
19fd6231 351 spin_unlock_irq(&mapping->tree_lock);
e23ca00b 352 return -EAGAIN;
b20a3503
CL
353 }
354
fe896d18 355 if (!page_ref_freeze(page, expected_count)) {
19fd6231 356 spin_unlock_irq(&mapping->tree_lock);
e286781d
NP
357 return -EAGAIN;
358 }
359
b969c4ab
MG
360 /*
361 * In the async migration case of moving a page with buffers, lock the
362 * buffers using trylock before the mapping is moved. If the mapping
363 * was moved, we later failed to lock the buffers and could not move
364 * the mapping back due to an elevated page count, we would have to
365 * block waiting on other references to be dropped.
366 */
a6bc32b8
MG
367 if (mode == MIGRATE_ASYNC && head &&
368 !buffer_migrate_lock_buffers(head, mode)) {
fe896d18 369 page_ref_unfreeze(page, expected_count);
b969c4ab
MG
370 spin_unlock_irq(&mapping->tree_lock);
371 return -EAGAIN;
372 }
373
b20a3503 374 /*
cf4b769a
HD
375 * Now we know that no one else is looking at the page:
376 * no turning back from here.
b20a3503 377 */
cf4b769a
HD
378 newpage->index = page->index;
379 newpage->mapping = page->mapping;
380 if (PageSwapBacked(page))
fa9949da 381 __SetPageSwapBacked(newpage);
cf4b769a 382
7cf9c2c7 383 get_page(newpage); /* add cache reference */
b20a3503
CL
384 if (PageSwapCache(page)) {
385 SetPageSwapCache(newpage);
386 set_page_private(newpage, page_private(page));
387 }
388
42cb14b1
HD
389 /* Move dirty while page refs frozen and newpage not yet exposed */
390 dirty = PageDirty(page);
391 if (dirty) {
392 ClearPageDirty(page);
393 SetPageDirty(newpage);
394 }
395
7cf9c2c7
NP
396 radix_tree_replace_slot(pslot, newpage);
397
398 /*
937a94c9
JG
399 * Drop cache reference from old page by unfreezing
400 * to one less reference.
7cf9c2c7
NP
401 * We know this isn't the last reference.
402 */
fe896d18 403 page_ref_unfreeze(page, expected_count - 1);
7cf9c2c7 404
42cb14b1
HD
405 spin_unlock(&mapping->tree_lock);
406 /* Leave irq disabled to prevent preemption while updating stats */
407
0e8c7d0f
CL
408 /*
409 * If moved to a different zone then also account
410 * the page for that zone. Other VM counters will be
411 * taken care of when we establish references to the
412 * new page and drop references to the old page.
413 *
414 * Note that anonymous pages are accounted for
415 * via NR_FILE_PAGES and NR_ANON_PAGES if they
416 * are mapped to swap space.
417 */
42cb14b1
HD
418 if (newzone != oldzone) {
419 __dec_zone_state(oldzone, NR_FILE_PAGES);
420 __inc_zone_state(newzone, NR_FILE_PAGES);
421 if (PageSwapBacked(page) && !PageSwapCache(page)) {
422 __dec_zone_state(oldzone, NR_SHMEM);
423 __inc_zone_state(newzone, NR_SHMEM);
424 }
425 if (dirty && mapping_cap_account_dirty(mapping)) {
426 __dec_zone_state(oldzone, NR_FILE_DIRTY);
427 __inc_zone_state(newzone, NR_FILE_DIRTY);
428 }
4b02108a 429 }
42cb14b1 430 local_irq_enable();
b20a3503 431
78bd5209 432 return MIGRATEPAGE_SUCCESS;
b20a3503 433}
1118dce7 434EXPORT_SYMBOL(migrate_page_move_mapping);
b20a3503 435
290408d4
NH
436/*
437 * The expected number of remaining references is the same as that
438 * of migrate_page_move_mapping().
439 */
440int migrate_huge_page_move_mapping(struct address_space *mapping,
441 struct page *newpage, struct page *page)
442{
443 int expected_count;
444 void **pslot;
445
290408d4
NH
446 spin_lock_irq(&mapping->tree_lock);
447
448 pslot = radix_tree_lookup_slot(&mapping->page_tree,
449 page_index(page));
450
451 expected_count = 2 + page_has_private(page);
452 if (page_count(page) != expected_count ||
29c1f677 453 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
290408d4
NH
454 spin_unlock_irq(&mapping->tree_lock);
455 return -EAGAIN;
456 }
457
fe896d18 458 if (!page_ref_freeze(page, expected_count)) {
290408d4
NH
459 spin_unlock_irq(&mapping->tree_lock);
460 return -EAGAIN;
461 }
462
cf4b769a
HD
463 newpage->index = page->index;
464 newpage->mapping = page->mapping;
6a93ca8f 465
290408d4
NH
466 get_page(newpage);
467
468 radix_tree_replace_slot(pslot, newpage);
469
fe896d18 470 page_ref_unfreeze(page, expected_count - 1);
290408d4
NH
471
472 spin_unlock_irq(&mapping->tree_lock);
6a93ca8f 473
78bd5209 474 return MIGRATEPAGE_SUCCESS;
290408d4
NH
475}
476
30b0a105
DH
477/*
478 * Gigantic pages are so large that we do not guarantee that page++ pointer
479 * arithmetic will work across the entire page. We need something more
480 * specialized.
481 */
482static void __copy_gigantic_page(struct page *dst, struct page *src,
483 int nr_pages)
484{
485 int i;
486 struct page *dst_base = dst;
487 struct page *src_base = src;
488
489 for (i = 0; i < nr_pages; ) {
490 cond_resched();
491 copy_highpage(dst, src);
492
493 i++;
494 dst = mem_map_next(dst, dst_base, i);
495 src = mem_map_next(src, src_base, i);
496 }
497}
498
499static void copy_huge_page(struct page *dst, struct page *src)
500{
501 int i;
502 int nr_pages;
503
504 if (PageHuge(src)) {
505 /* hugetlbfs page */
506 struct hstate *h = page_hstate(src);
507 nr_pages = pages_per_huge_page(h);
508
509 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
510 __copy_gigantic_page(dst, src, nr_pages);
511 return;
512 }
513 } else {
514 /* thp page */
515 BUG_ON(!PageTransHuge(src));
516 nr_pages = hpage_nr_pages(src);
517 }
518
519 for (i = 0; i < nr_pages; i++) {
520 cond_resched();
521 copy_highpage(dst + i, src + i);
522 }
523}
524
b20a3503
CL
525/*
526 * Copy the page to its new location
527 */
290408d4 528void migrate_page_copy(struct page *newpage, struct page *page)
b20a3503 529{
7851a45c
RR
530 int cpupid;
531
b32967ff 532 if (PageHuge(page) || PageTransHuge(page))
290408d4
NH
533 copy_huge_page(newpage, page);
534 else
535 copy_highpage(newpage, page);
b20a3503
CL
536
537 if (PageError(page))
538 SetPageError(newpage);
539 if (PageReferenced(page))
540 SetPageReferenced(newpage);
541 if (PageUptodate(page))
542 SetPageUptodate(newpage);
894bc310 543 if (TestClearPageActive(page)) {
309381fe 544 VM_BUG_ON_PAGE(PageUnevictable(page), page);
b20a3503 545 SetPageActive(newpage);
418b27ef
LS
546 } else if (TestClearPageUnevictable(page))
547 SetPageUnevictable(newpage);
b20a3503
CL
548 if (PageChecked(page))
549 SetPageChecked(newpage);
550 if (PageMappedToDisk(page))
551 SetPageMappedToDisk(newpage);
552
42cb14b1
HD
553 /* Move dirty on pages not done by migrate_page_move_mapping() */
554 if (PageDirty(page))
555 SetPageDirty(newpage);
b20a3503 556
33c3fc71
VD
557 if (page_is_young(page))
558 set_page_young(newpage);
559 if (page_is_idle(page))
560 set_page_idle(newpage);
561
7851a45c
RR
562 /*
563 * Copy NUMA information to the new page, to prevent over-eager
564 * future migrations of this same page.
565 */
566 cpupid = page_cpupid_xchg_last(page, -1);
567 page_cpupid_xchg_last(newpage, cpupid);
568
e9995ef9 569 ksm_migrate_page(newpage, page);
c8d6553b
HD
570 /*
571 * Please do not reorder this without considering how mm/ksm.c's
572 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
573 */
b3b3a99c
NH
574 if (PageSwapCache(page))
575 ClearPageSwapCache(page);
b20a3503
CL
576 ClearPagePrivate(page);
577 set_page_private(page, 0);
b20a3503
CL
578
579 /*
580 * If any waiters have accumulated on the new page then
581 * wake them up.
582 */
583 if (PageWriteback(newpage))
584 end_page_writeback(newpage);
d435edca
VB
585
586 copy_page_owner(page, newpage);
74485cf2
JW
587
588 mem_cgroup_migrate(page, newpage);
b20a3503 589}
1118dce7 590EXPORT_SYMBOL(migrate_page_copy);
b20a3503 591
1d8b85cc
CL
592/************************************************************
593 * Migration functions
594 ***********************************************************/
595
b20a3503
CL
596/*
597 * Common logic to directly migrate a single page suitable for
266cf658 598 * pages that do not use PagePrivate/PagePrivate2.
b20a3503
CL
599 *
600 * Pages are locked upon entry and exit.
601 */
2d1db3b1 602int migrate_page(struct address_space *mapping,
a6bc32b8
MG
603 struct page *newpage, struct page *page,
604 enum migrate_mode mode)
b20a3503
CL
605{
606 int rc;
607
608 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
609
8e321fef 610 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
b20a3503 611
78bd5209 612 if (rc != MIGRATEPAGE_SUCCESS)
b20a3503
CL
613 return rc;
614
615 migrate_page_copy(newpage, page);
78bd5209 616 return MIGRATEPAGE_SUCCESS;
b20a3503
CL
617}
618EXPORT_SYMBOL(migrate_page);
619
9361401e 620#ifdef CONFIG_BLOCK
1d8b85cc
CL
621/*
622 * Migration function for pages with buffers. This function can only be used
623 * if the underlying filesystem guarantees that no other references to "page"
624 * exist.
625 */
2d1db3b1 626int buffer_migrate_page(struct address_space *mapping,
a6bc32b8 627 struct page *newpage, struct page *page, enum migrate_mode mode)
1d8b85cc 628{
1d8b85cc
CL
629 struct buffer_head *bh, *head;
630 int rc;
631
1d8b85cc 632 if (!page_has_buffers(page))
a6bc32b8 633 return migrate_page(mapping, newpage, page, mode);
1d8b85cc
CL
634
635 head = page_buffers(page);
636
8e321fef 637 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
1d8b85cc 638
78bd5209 639 if (rc != MIGRATEPAGE_SUCCESS)
1d8b85cc
CL
640 return rc;
641
b969c4ab
MG
642 /*
643 * In the async case, migrate_page_move_mapping locked the buffers
644 * with an IRQ-safe spinlock held. In the sync case, the buffers
645 * need to be locked now
646 */
a6bc32b8
MG
647 if (mode != MIGRATE_ASYNC)
648 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
1d8b85cc
CL
649
650 ClearPagePrivate(page);
651 set_page_private(newpage, page_private(page));
652 set_page_private(page, 0);
653 put_page(page);
654 get_page(newpage);
655
656 bh = head;
657 do {
658 set_bh_page(bh, newpage, bh_offset(bh));
659 bh = bh->b_this_page;
660
661 } while (bh != head);
662
663 SetPagePrivate(newpage);
664
665 migrate_page_copy(newpage, page);
666
667 bh = head;
668 do {
669 unlock_buffer(bh);
670 put_bh(bh);
671 bh = bh->b_this_page;
672
673 } while (bh != head);
674
78bd5209 675 return MIGRATEPAGE_SUCCESS;
1d8b85cc
CL
676}
677EXPORT_SYMBOL(buffer_migrate_page);
9361401e 678#endif
1d8b85cc 679
04e62a29
CL
680/*
681 * Writeback a page to clean the dirty state
682 */
683static int writeout(struct address_space *mapping, struct page *page)
8351a6e4 684{
04e62a29
CL
685 struct writeback_control wbc = {
686 .sync_mode = WB_SYNC_NONE,
687 .nr_to_write = 1,
688 .range_start = 0,
689 .range_end = LLONG_MAX,
04e62a29
CL
690 .for_reclaim = 1
691 };
692 int rc;
693
694 if (!mapping->a_ops->writepage)
695 /* No write method for the address space */
696 return -EINVAL;
697
698 if (!clear_page_dirty_for_io(page))
699 /* Someone else already triggered a write */
700 return -EAGAIN;
701
8351a6e4 702 /*
04e62a29
CL
703 * A dirty page may imply that the underlying filesystem has
704 * the page on some queue. So the page must be clean for
705 * migration. Writeout may mean we loose the lock and the
706 * page state is no longer what we checked for earlier.
707 * At this point we know that the migration attempt cannot
708 * be successful.
8351a6e4 709 */
e388466d 710 remove_migration_ptes(page, page, false);
8351a6e4 711
04e62a29 712 rc = mapping->a_ops->writepage(page, &wbc);
8351a6e4 713
04e62a29
CL
714 if (rc != AOP_WRITEPAGE_ACTIVATE)
715 /* unlocked. Relock */
716 lock_page(page);
717
bda8550d 718 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
719}
720
721/*
722 * Default handling if a filesystem does not provide a migration function.
723 */
724static int fallback_migrate_page(struct address_space *mapping,
a6bc32b8 725 struct page *newpage, struct page *page, enum migrate_mode mode)
04e62a29 726{
b969c4ab 727 if (PageDirty(page)) {
a6bc32b8
MG
728 /* Only writeback pages in full synchronous migration */
729 if (mode != MIGRATE_SYNC)
b969c4ab 730 return -EBUSY;
04e62a29 731 return writeout(mapping, page);
b969c4ab 732 }
8351a6e4
CL
733
734 /*
735 * Buffers may be managed in a filesystem specific way.
736 * We must have no buffers or drop them.
737 */
266cf658 738 if (page_has_private(page) &&
8351a6e4
CL
739 !try_to_release_page(page, GFP_KERNEL))
740 return -EAGAIN;
741
a6bc32b8 742 return migrate_page(mapping, newpage, page, mode);
8351a6e4
CL
743}
744
e24f0b8f
CL
745/*
746 * Move a page to a newly allocated page
747 * The page is locked and all ptes have been successfully removed.
748 *
749 * The new page will have replaced the old page if this function
750 * is successful.
894bc310
LS
751 *
752 * Return value:
753 * < 0 - error code
78bd5209 754 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 755 */
3fe2011f 756static int move_to_new_page(struct page *newpage, struct page *page,
5c3f9a67 757 enum migrate_mode mode)
e24f0b8f
CL
758{
759 struct address_space *mapping;
760 int rc;
761
7db7671f
HD
762 VM_BUG_ON_PAGE(!PageLocked(page), page);
763 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
e24f0b8f 764
e24f0b8f
CL
765 mapping = page_mapping(page);
766 if (!mapping)
a6bc32b8 767 rc = migrate_page(mapping, newpage, page, mode);
b969c4ab 768 else if (mapping->a_ops->migratepage)
e24f0b8f 769 /*
b969c4ab
MG
770 * Most pages have a mapping and most filesystems provide a
771 * migratepage callback. Anonymous pages are part of swap
772 * space which also has its own migratepage callback. This
773 * is the most common path for page migration.
e24f0b8f 774 */
5c3f9a67 775 rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
b969c4ab 776 else
a6bc32b8 777 rc = fallback_migrate_page(mapping, newpage, page, mode);
e24f0b8f 778
5c3f9a67
HD
779 /*
780 * When successful, old pagecache page->mapping must be cleared before
781 * page is freed; but stats require that PageAnon be left as PageAnon.
782 */
783 if (rc == MIGRATEPAGE_SUCCESS) {
5c3f9a67
HD
784 if (!PageAnon(page))
785 page->mapping = NULL;
3fe2011f 786 }
e24f0b8f
CL
787 return rc;
788}
789
0dabec93 790static int __unmap_and_move(struct page *page, struct page *newpage,
9c620e2b 791 int force, enum migrate_mode mode)
e24f0b8f 792{
0dabec93 793 int rc = -EAGAIN;
2ebba6b7 794 int page_was_mapped = 0;
3f6c8272 795 struct anon_vma *anon_vma = NULL;
95a402c3 796
529ae9aa 797 if (!trylock_page(page)) {
a6bc32b8 798 if (!force || mode == MIGRATE_ASYNC)
0dabec93 799 goto out;
3e7d3449
MG
800
801 /*
802 * It's not safe for direct compaction to call lock_page.
803 * For example, during page readahead pages are added locked
804 * to the LRU. Later, when the IO completes the pages are
805 * marked uptodate and unlocked. However, the queueing
806 * could be merging multiple pages for one bio (e.g.
807 * mpage_readpages). If an allocation happens for the
808 * second or third page, the process can end up locking
809 * the same page twice and deadlocking. Rather than
810 * trying to be clever about what pages can be locked,
811 * avoid the use of lock_page for direct compaction
812 * altogether.
813 */
814 if (current->flags & PF_MEMALLOC)
0dabec93 815 goto out;
3e7d3449 816
e24f0b8f
CL
817 lock_page(page);
818 }
819
820 if (PageWriteback(page)) {
11bc82d6 821 /*
fed5b64a 822 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
823 * necessary to wait for PageWriteback. In the async case,
824 * the retry loop is too short and in the sync-light case,
825 * the overhead of stalling is too much
11bc82d6 826 */
a6bc32b8 827 if (mode != MIGRATE_SYNC) {
11bc82d6 828 rc = -EBUSY;
0a31bc97 829 goto out_unlock;
11bc82d6
AA
830 }
831 if (!force)
0a31bc97 832 goto out_unlock;
e24f0b8f
CL
833 wait_on_page_writeback(page);
834 }
03f15c86 835
e24f0b8f 836 /*
dc386d4d
KH
837 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
838 * we cannot notice that anon_vma is freed while we migrates a page.
1ce82b69 839 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 840 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
841 * File Caches may use write_page() or lock_page() in migration, then,
842 * just care Anon page here.
03f15c86
HD
843 *
844 * Only page_get_anon_vma() understands the subtleties of
845 * getting a hold on an anon_vma from outside one of its mms.
846 * But if we cannot get anon_vma, then we won't need it anyway,
847 * because that implies that the anon page is no longer mapped
848 * (and cannot be remapped so long as we hold the page lock).
dc386d4d 849 */
03f15c86 850 if (PageAnon(page) && !PageKsm(page))
746b18d4 851 anon_vma = page_get_anon_vma(page);
62e1c553 852
7db7671f
HD
853 /*
854 * Block others from accessing the new page when we get around to
855 * establishing additional references. We are usually the only one
856 * holding a reference to newpage at this point. We used to have a BUG
857 * here if trylock_page(newpage) fails, but would like to allow for
858 * cases where there might be a race with the previous use of newpage.
859 * This is much like races on refcount of oldpage: just don't BUG().
860 */
861 if (unlikely(!trylock_page(newpage)))
862 goto out_unlock;
863
d6d86c0a 864 if (unlikely(isolated_balloon_page(page))) {
bf6bddf1
RA
865 /*
866 * A ballooned page does not need any special attention from
867 * physical to virtual reverse mapping procedures.
868 * Skip any attempt to unmap PTEs or to remap swap cache,
869 * in order to avoid burning cycles at rmap level, and perform
870 * the page migration right away (proteced by page lock).
871 */
872 rc = balloon_page_migrate(newpage, page, mode);
7db7671f 873 goto out_unlock_both;
bf6bddf1
RA
874 }
875
dc386d4d 876 /*
62e1c553
SL
877 * Corner case handling:
878 * 1. When a new swap-cache page is read into, it is added to the LRU
879 * and treated as swapcache but it has no rmap yet.
880 * Calling try_to_unmap() against a page->mapping==NULL page will
881 * trigger a BUG. So handle it here.
882 * 2. An orphaned page (see truncate_complete_page) might have
883 * fs-private metadata. The page can be picked up due to memory
884 * offlining. Everywhere else except page reclaim, the page is
885 * invisible to the vm, so the page can not be migrated. So try to
886 * free the metadata, so the page can be freed.
e24f0b8f 887 */
62e1c553 888 if (!page->mapping) {
309381fe 889 VM_BUG_ON_PAGE(PageAnon(page), page);
1ce82b69 890 if (page_has_private(page)) {
62e1c553 891 try_to_free_buffers(page);
7db7671f 892 goto out_unlock_both;
62e1c553 893 }
7db7671f
HD
894 } else if (page_mapped(page)) {
895 /* Establish migration ptes */
03f15c86
HD
896 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
897 page);
2ebba6b7 898 try_to_unmap(page,
da1b13cc 899 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
2ebba6b7
HD
900 page_was_mapped = 1;
901 }
dc386d4d 902
e6a1530d 903 if (!page_mapped(page))
5c3f9a67 904 rc = move_to_new_page(newpage, page, mode);
e24f0b8f 905
5c3f9a67
HD
906 if (page_was_mapped)
907 remove_migration_ptes(page,
e388466d 908 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
3f6c8272 909
7db7671f
HD
910out_unlock_both:
911 unlock_page(newpage);
912out_unlock:
3f6c8272 913 /* Drop an anon_vma reference if we took one */
76545066 914 if (anon_vma)
9e60109f 915 put_anon_vma(anon_vma);
e24f0b8f 916 unlock_page(page);
0dabec93 917out:
c6c919eb
MK
918 /*
919 * If migration is successful, decrease refcount of the newpage
920 * which will not free the page because new page owner increased
921 * refcounter. As well, if it is LRU page, add the page to LRU
922 * list in here.
923 */
924 if (rc == MIGRATEPAGE_SUCCESS) {
925 if (unlikely(__is_movable_balloon_page(newpage)))
926 put_page(newpage);
927 else
928 putback_lru_page(newpage);
929 }
930
0dabec93
MK
931 return rc;
932}
95a402c3 933
ef2a5153
GU
934/*
935 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
936 * around it.
937 */
938#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
939#define ICE_noinline noinline
940#else
941#define ICE_noinline
942#endif
943
0dabec93
MK
944/*
945 * Obtain the lock on page, remove all ptes and migrate the page
946 * to the newly allocated page in newpage.
947 */
ef2a5153
GU
948static ICE_noinline int unmap_and_move(new_page_t get_new_page,
949 free_page_t put_new_page,
950 unsigned long private, struct page *page,
add05cec
NH
951 int force, enum migrate_mode mode,
952 enum migrate_reason reason)
0dabec93 953{
2def7424 954 int rc = MIGRATEPAGE_SUCCESS;
0dabec93 955 int *result = NULL;
2def7424 956 struct page *newpage;
0dabec93 957
2def7424 958 newpage = get_new_page(page, private, &result);
0dabec93
MK
959 if (!newpage)
960 return -ENOMEM;
961
962 if (page_count(page) == 1) {
963 /* page was freed from under us. So we are done. */
c6c919eb
MK
964 ClearPageActive(page);
965 ClearPageUnevictable(page);
966 if (put_new_page)
967 put_new_page(newpage, private);
968 else
969 put_page(newpage);
0dabec93
MK
970 goto out;
971 }
972
4d2fa965
KS
973 if (unlikely(PageTransHuge(page))) {
974 lock_page(page);
975 rc = split_huge_page(page);
976 unlock_page(page);
977 if (rc)
0dabec93 978 goto out;
4d2fa965 979 }
0dabec93 980
9c620e2b 981 rc = __unmap_and_move(page, newpage, force, mode);
c6c919eb 982 if (rc == MIGRATEPAGE_SUCCESS)
7cd12b4a 983 set_page_owner_migrate_reason(newpage, reason);
bf6bddf1 984
0dabec93 985out:
e24f0b8f 986 if (rc != -EAGAIN) {
0dabec93
MK
987 /*
988 * A page that has been migrated has all references
989 * removed and will be freed. A page that has not been
990 * migrated will have kepts its references and be
991 * restored.
992 */
993 list_del(&page->lru);
a731286d 994 dec_zone_page_state(page, NR_ISOLATED_ANON +
6c0b1351 995 page_is_file_cache(page));
c6c919eb
MK
996 }
997
998 /*
999 * If migration is successful, releases reference grabbed during
1000 * isolation. Otherwise, restore the page to right list unless
1001 * we want to retry.
1002 */
1003 if (rc == MIGRATEPAGE_SUCCESS) {
1004 put_page(page);
1005 if (reason == MR_MEMORY_FAILURE) {
d7e69488 1006 /*
c6c919eb
MK
1007 * Set PG_HWPoison on just freed page
1008 * intentionally. Although it's rather weird,
1009 * it's how HWPoison flag works at the moment.
d7e69488 1010 */
da1b13cc
WL
1011 if (!test_set_page_hwpoison(page))
1012 num_poisoned_pages_inc();
c6c919eb
MK
1013 }
1014 } else {
1015 if (rc != -EAGAIN)
add05cec 1016 putback_lru_page(page);
c6c919eb
MK
1017 if (put_new_page)
1018 put_new_page(newpage, private);
1019 else
1020 put_page(newpage);
e24f0b8f 1021 }
68711a74 1022
742755a1
CL
1023 if (result) {
1024 if (rc)
1025 *result = rc;
1026 else
1027 *result = page_to_nid(newpage);
1028 }
e24f0b8f
CL
1029 return rc;
1030}
1031
290408d4
NH
1032/*
1033 * Counterpart of unmap_and_move_page() for hugepage migration.
1034 *
1035 * This function doesn't wait the completion of hugepage I/O
1036 * because there is no race between I/O and migration for hugepage.
1037 * Note that currently hugepage I/O occurs only in direct I/O
1038 * where no lock is held and PG_writeback is irrelevant,
1039 * and writeback status of all subpages are counted in the reference
1040 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1041 * under direct I/O, the reference of the head page is 512 and a bit more.)
1042 * This means that when we try to migrate hugepage whose subpages are
1043 * doing direct I/O, some references remain after try_to_unmap() and
1044 * hugepage migration fails without data corruption.
1045 *
1046 * There is also no race when direct I/O is issued on the page under migration,
1047 * because then pte is replaced with migration swap entry and direct I/O code
1048 * will wait in the page fault for migration to complete.
1049 */
1050static int unmap_and_move_huge_page(new_page_t get_new_page,
68711a74
DR
1051 free_page_t put_new_page, unsigned long private,
1052 struct page *hpage, int force,
7cd12b4a 1053 enum migrate_mode mode, int reason)
290408d4 1054{
2def7424 1055 int rc = -EAGAIN;
290408d4 1056 int *result = NULL;
2ebba6b7 1057 int page_was_mapped = 0;
32665f2b 1058 struct page *new_hpage;
290408d4
NH
1059 struct anon_vma *anon_vma = NULL;
1060
83467efb
NH
1061 /*
1062 * Movability of hugepages depends on architectures and hugepage size.
1063 * This check is necessary because some callers of hugepage migration
1064 * like soft offline and memory hotremove don't walk through page
1065 * tables or check whether the hugepage is pmd-based or not before
1066 * kicking migration.
1067 */
100873d7 1068 if (!hugepage_migration_supported(page_hstate(hpage))) {
32665f2b 1069 putback_active_hugepage(hpage);
83467efb 1070 return -ENOSYS;
32665f2b 1071 }
83467efb 1072
32665f2b 1073 new_hpage = get_new_page(hpage, private, &result);
290408d4
NH
1074 if (!new_hpage)
1075 return -ENOMEM;
1076
290408d4 1077 if (!trylock_page(hpage)) {
a6bc32b8 1078 if (!force || mode != MIGRATE_SYNC)
290408d4
NH
1079 goto out;
1080 lock_page(hpage);
1081 }
1082
746b18d4
PZ
1083 if (PageAnon(hpage))
1084 anon_vma = page_get_anon_vma(hpage);
290408d4 1085
7db7671f
HD
1086 if (unlikely(!trylock_page(new_hpage)))
1087 goto put_anon;
1088
2ebba6b7
HD
1089 if (page_mapped(hpage)) {
1090 try_to_unmap(hpage,
1091 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1092 page_was_mapped = 1;
1093 }
290408d4
NH
1094
1095 if (!page_mapped(hpage))
5c3f9a67 1096 rc = move_to_new_page(new_hpage, hpage, mode);
290408d4 1097
5c3f9a67
HD
1098 if (page_was_mapped)
1099 remove_migration_ptes(hpage,
e388466d 1100 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
290408d4 1101
7db7671f
HD
1102 unlock_page(new_hpage);
1103
1104put_anon:
fd4a4663 1105 if (anon_vma)
9e60109f 1106 put_anon_vma(anon_vma);
8e6ac7fa 1107
2def7424 1108 if (rc == MIGRATEPAGE_SUCCESS) {
8e6ac7fa 1109 hugetlb_cgroup_migrate(hpage, new_hpage);
2def7424 1110 put_new_page = NULL;
7cd12b4a 1111 set_page_owner_migrate_reason(new_hpage, reason);
2def7424 1112 }
8e6ac7fa 1113
290408d4 1114 unlock_page(hpage);
09761333 1115out:
b8ec1cee
NH
1116 if (rc != -EAGAIN)
1117 putback_active_hugepage(hpage);
68711a74
DR
1118
1119 /*
1120 * If migration was not successful and there's a freeing callback, use
1121 * it. Otherwise, put_page() will drop the reference grabbed during
1122 * isolation.
1123 */
2def7424 1124 if (put_new_page)
68711a74
DR
1125 put_new_page(new_hpage, private);
1126 else
3aaa76e1 1127 putback_active_hugepage(new_hpage);
68711a74 1128
290408d4
NH
1129 if (result) {
1130 if (rc)
1131 *result = rc;
1132 else
1133 *result = page_to_nid(new_hpage);
1134 }
1135 return rc;
1136}
1137
b20a3503 1138/*
c73e5c9c
SB
1139 * migrate_pages - migrate the pages specified in a list, to the free pages
1140 * supplied as the target for the page migration
b20a3503 1141 *
c73e5c9c
SB
1142 * @from: The list of pages to be migrated.
1143 * @get_new_page: The function used to allocate free pages to be used
1144 * as the target of the page migration.
68711a74
DR
1145 * @put_new_page: The function used to free target pages if migration
1146 * fails, or NULL if no special handling is necessary.
c73e5c9c
SB
1147 * @private: Private data to be passed on to get_new_page()
1148 * @mode: The migration mode that specifies the constraints for
1149 * page migration, if any.
1150 * @reason: The reason for page migration.
b20a3503 1151 *
c73e5c9c
SB
1152 * The function returns after 10 attempts or if no pages are movable any more
1153 * because the list has become empty or no retryable pages exist any more.
14e0f9bc 1154 * The caller should call putback_movable_pages() to return pages to the LRU
28bd6578 1155 * or free list only if ret != 0.
b20a3503 1156 *
c73e5c9c 1157 * Returns the number of pages that were not migrated, or an error code.
b20a3503 1158 */
9c620e2b 1159int migrate_pages(struct list_head *from, new_page_t get_new_page,
68711a74
DR
1160 free_page_t put_new_page, unsigned long private,
1161 enum migrate_mode mode, int reason)
b20a3503 1162{
e24f0b8f 1163 int retry = 1;
b20a3503 1164 int nr_failed = 0;
5647bc29 1165 int nr_succeeded = 0;
b20a3503
CL
1166 int pass = 0;
1167 struct page *page;
1168 struct page *page2;
1169 int swapwrite = current->flags & PF_SWAPWRITE;
1170 int rc;
1171
1172 if (!swapwrite)
1173 current->flags |= PF_SWAPWRITE;
1174
e24f0b8f
CL
1175 for(pass = 0; pass < 10 && retry; pass++) {
1176 retry = 0;
b20a3503 1177
e24f0b8f 1178 list_for_each_entry_safe(page, page2, from, lru) {
e24f0b8f 1179 cond_resched();
2d1db3b1 1180
31caf665
NH
1181 if (PageHuge(page))
1182 rc = unmap_and_move_huge_page(get_new_page,
68711a74 1183 put_new_page, private, page,
7cd12b4a 1184 pass > 2, mode, reason);
31caf665 1185 else
68711a74 1186 rc = unmap_and_move(get_new_page, put_new_page,
add05cec
NH
1187 private, page, pass > 2, mode,
1188 reason);
2d1db3b1 1189
e24f0b8f 1190 switch(rc) {
95a402c3 1191 case -ENOMEM:
dfef2ef4 1192 nr_failed++;
95a402c3 1193 goto out;
e24f0b8f 1194 case -EAGAIN:
2d1db3b1 1195 retry++;
e24f0b8f 1196 break;
78bd5209 1197 case MIGRATEPAGE_SUCCESS:
5647bc29 1198 nr_succeeded++;
e24f0b8f
CL
1199 break;
1200 default:
354a3363
NH
1201 /*
1202 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1203 * unlike -EAGAIN case, the failed page is
1204 * removed from migration page list and not
1205 * retried in the next outer loop.
1206 */
2d1db3b1 1207 nr_failed++;
e24f0b8f 1208 break;
2d1db3b1 1209 }
b20a3503
CL
1210 }
1211 }
f2f81fb2
VB
1212 nr_failed += retry;
1213 rc = nr_failed;
95a402c3 1214out:
5647bc29
MG
1215 if (nr_succeeded)
1216 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1217 if (nr_failed)
1218 count_vm_events(PGMIGRATE_FAIL, nr_failed);
7b2a2d4a
MG
1219 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1220
b20a3503
CL
1221 if (!swapwrite)
1222 current->flags &= ~PF_SWAPWRITE;
1223
78bd5209 1224 return rc;
b20a3503 1225}
95a402c3 1226
742755a1
CL
1227#ifdef CONFIG_NUMA
1228/*
1229 * Move a list of individual pages
1230 */
1231struct page_to_node {
1232 unsigned long addr;
1233 struct page *page;
1234 int node;
1235 int status;
1236};
1237
1238static struct page *new_page_node(struct page *p, unsigned long private,
1239 int **result)
1240{
1241 struct page_to_node *pm = (struct page_to_node *)private;
1242
1243 while (pm->node != MAX_NUMNODES && pm->page != p)
1244 pm++;
1245
1246 if (pm->node == MAX_NUMNODES)
1247 return NULL;
1248
1249 *result = &pm->status;
1250
e632a938
NH
1251 if (PageHuge(p))
1252 return alloc_huge_page_node(page_hstate(compound_head(p)),
1253 pm->node);
1254 else
96db800f 1255 return __alloc_pages_node(pm->node,
e97ca8e5 1256 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
742755a1
CL
1257}
1258
1259/*
1260 * Move a set of pages as indicated in the pm array. The addr
1261 * field must be set to the virtual address of the page to be moved
1262 * and the node number must contain a valid target node.
5e9a0f02 1263 * The pm array ends with node = MAX_NUMNODES.
742755a1 1264 */
5e9a0f02
BG
1265static int do_move_page_to_node_array(struct mm_struct *mm,
1266 struct page_to_node *pm,
1267 int migrate_all)
742755a1
CL
1268{
1269 int err;
1270 struct page_to_node *pp;
1271 LIST_HEAD(pagelist);
1272
1273 down_read(&mm->mmap_sem);
1274
1275 /*
1276 * Build a list of pages to migrate
1277 */
742755a1
CL
1278 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1279 struct vm_area_struct *vma;
1280 struct page *page;
1281
742755a1
CL
1282 err = -EFAULT;
1283 vma = find_vma(mm, pp->addr);
70384dc6 1284 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
742755a1
CL
1285 goto set_status;
1286
d899844e
KS
1287 /* FOLL_DUMP to ignore special (like zero) pages */
1288 page = follow_page(vma, pp->addr,
1289 FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
89f5b7da
LT
1290
1291 err = PTR_ERR(page);
1292 if (IS_ERR(page))
1293 goto set_status;
1294
742755a1
CL
1295 err = -ENOENT;
1296 if (!page)
1297 goto set_status;
1298
742755a1
CL
1299 pp->page = page;
1300 err = page_to_nid(page);
1301
1302 if (err == pp->node)
1303 /*
1304 * Node already in the right place
1305 */
1306 goto put_and_set;
1307
1308 err = -EACCES;
1309 if (page_mapcount(page) > 1 &&
1310 !migrate_all)
1311 goto put_and_set;
1312
e632a938 1313 if (PageHuge(page)) {
e66f17ff
NH
1314 if (PageHead(page))
1315 isolate_huge_page(page, &pagelist);
e632a938
NH
1316 goto put_and_set;
1317 }
1318
62695a84 1319 err = isolate_lru_page(page);
6d9c285a 1320 if (!err) {
62695a84 1321 list_add_tail(&page->lru, &pagelist);
6d9c285a
KM
1322 inc_zone_page_state(page, NR_ISOLATED_ANON +
1323 page_is_file_cache(page));
1324 }
742755a1
CL
1325put_and_set:
1326 /*
1327 * Either remove the duplicate refcount from
1328 * isolate_lru_page() or drop the page ref if it was
1329 * not isolated.
1330 */
1331 put_page(page);
1332set_status:
1333 pp->status = err;
1334 }
1335
e78bbfa8 1336 err = 0;
cf608ac1 1337 if (!list_empty(&pagelist)) {
68711a74 1338 err = migrate_pages(&pagelist, new_page_node, NULL,
9c620e2b 1339 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1340 if (err)
e632a938 1341 putback_movable_pages(&pagelist);
cf608ac1 1342 }
742755a1
CL
1343
1344 up_read(&mm->mmap_sem);
1345 return err;
1346}
1347
5e9a0f02
BG
1348/*
1349 * Migrate an array of page address onto an array of nodes and fill
1350 * the corresponding array of status.
1351 */
3268c63e 1352static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
1353 unsigned long nr_pages,
1354 const void __user * __user *pages,
1355 const int __user *nodes,
1356 int __user *status, int flags)
1357{
3140a227 1358 struct page_to_node *pm;
3140a227
BG
1359 unsigned long chunk_nr_pages;
1360 unsigned long chunk_start;
1361 int err;
5e9a0f02 1362
3140a227
BG
1363 err = -ENOMEM;
1364 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1365 if (!pm)
5e9a0f02 1366 goto out;
35282a2d
BG
1367
1368 migrate_prep();
1369
5e9a0f02 1370 /*
3140a227
BG
1371 * Store a chunk of page_to_node array in a page,
1372 * but keep the last one as a marker
5e9a0f02 1373 */
3140a227 1374 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
5e9a0f02 1375
3140a227
BG
1376 for (chunk_start = 0;
1377 chunk_start < nr_pages;
1378 chunk_start += chunk_nr_pages) {
1379 int j;
5e9a0f02 1380
3140a227
BG
1381 if (chunk_start + chunk_nr_pages > nr_pages)
1382 chunk_nr_pages = nr_pages - chunk_start;
1383
1384 /* fill the chunk pm with addrs and nodes from user-space */
1385 for (j = 0; j < chunk_nr_pages; j++) {
1386 const void __user *p;
5e9a0f02
BG
1387 int node;
1388
3140a227
BG
1389 err = -EFAULT;
1390 if (get_user(p, pages + j + chunk_start))
1391 goto out_pm;
1392 pm[j].addr = (unsigned long) p;
1393
1394 if (get_user(node, nodes + j + chunk_start))
5e9a0f02
BG
1395 goto out_pm;
1396
1397 err = -ENODEV;
6f5a55f1
LT
1398 if (node < 0 || node >= MAX_NUMNODES)
1399 goto out_pm;
1400
389162c2 1401 if (!node_state(node, N_MEMORY))
5e9a0f02
BG
1402 goto out_pm;
1403
1404 err = -EACCES;
1405 if (!node_isset(node, task_nodes))
1406 goto out_pm;
1407
3140a227
BG
1408 pm[j].node = node;
1409 }
1410
1411 /* End marker for this chunk */
1412 pm[chunk_nr_pages].node = MAX_NUMNODES;
1413
1414 /* Migrate this chunk */
1415 err = do_move_page_to_node_array(mm, pm,
1416 flags & MPOL_MF_MOVE_ALL);
1417 if (err < 0)
1418 goto out_pm;
5e9a0f02 1419
5e9a0f02 1420 /* Return status information */
3140a227
BG
1421 for (j = 0; j < chunk_nr_pages; j++)
1422 if (put_user(pm[j].status, status + j + chunk_start)) {
5e9a0f02 1423 err = -EFAULT;
3140a227
BG
1424 goto out_pm;
1425 }
1426 }
1427 err = 0;
5e9a0f02
BG
1428
1429out_pm:
3140a227 1430 free_page((unsigned long)pm);
5e9a0f02
BG
1431out:
1432 return err;
1433}
1434
742755a1 1435/*
2f007e74 1436 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 1437 */
80bba129
BG
1438static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1439 const void __user **pages, int *status)
742755a1 1440{
2f007e74 1441 unsigned long i;
2f007e74 1442
742755a1
CL
1443 down_read(&mm->mmap_sem);
1444
2f007e74 1445 for (i = 0; i < nr_pages; i++) {
80bba129 1446 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
1447 struct vm_area_struct *vma;
1448 struct page *page;
c095adbc 1449 int err = -EFAULT;
2f007e74
BG
1450
1451 vma = find_vma(mm, addr);
70384dc6 1452 if (!vma || addr < vma->vm_start)
742755a1
CL
1453 goto set_status;
1454
d899844e
KS
1455 /* FOLL_DUMP to ignore special (like zero) pages */
1456 page = follow_page(vma, addr, FOLL_DUMP);
89f5b7da
LT
1457
1458 err = PTR_ERR(page);
1459 if (IS_ERR(page))
1460 goto set_status;
1461
d899844e 1462 err = page ? page_to_nid(page) : -ENOENT;
742755a1 1463set_status:
80bba129
BG
1464 *status = err;
1465
1466 pages++;
1467 status++;
1468 }
1469
1470 up_read(&mm->mmap_sem);
1471}
1472
1473/*
1474 * Determine the nodes of a user array of pages and store it in
1475 * a user array of status.
1476 */
1477static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1478 const void __user * __user *pages,
1479 int __user *status)
1480{
1481#define DO_PAGES_STAT_CHUNK_NR 16
1482 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1483 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 1484
87b8d1ad
PA
1485 while (nr_pages) {
1486 unsigned long chunk_nr;
80bba129 1487
87b8d1ad
PA
1488 chunk_nr = nr_pages;
1489 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1490 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1491
1492 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1493 break;
80bba129
BG
1494
1495 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1496
87b8d1ad
PA
1497 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1498 break;
742755a1 1499
87b8d1ad
PA
1500 pages += chunk_nr;
1501 status += chunk_nr;
1502 nr_pages -= chunk_nr;
1503 }
1504 return nr_pages ? -EFAULT : 0;
742755a1
CL
1505}
1506
1507/*
1508 * Move a list of pages in the address space of the currently executing
1509 * process.
1510 */
938bb9f5
HC
1511SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1512 const void __user * __user *, pages,
1513 const int __user *, nodes,
1514 int __user *, status, int, flags)
742755a1 1515{
c69e8d9c 1516 const struct cred *cred = current_cred(), *tcred;
742755a1 1517 struct task_struct *task;
742755a1 1518 struct mm_struct *mm;
5e9a0f02 1519 int err;
3268c63e 1520 nodemask_t task_nodes;
742755a1
CL
1521
1522 /* Check flags */
1523 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1524 return -EINVAL;
1525
1526 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1527 return -EPERM;
1528
1529 /* Find the mm_struct */
a879bf58 1530 rcu_read_lock();
228ebcbe 1531 task = pid ? find_task_by_vpid(pid) : current;
742755a1 1532 if (!task) {
a879bf58 1533 rcu_read_unlock();
742755a1
CL
1534 return -ESRCH;
1535 }
3268c63e 1536 get_task_struct(task);
742755a1
CL
1537
1538 /*
1539 * Check if this process has the right to modify the specified
1540 * process. The right exists if the process has administrative
1541 * capabilities, superuser privileges or the same
1542 * userid as the target process.
1543 */
c69e8d9c 1544 tcred = __task_cred(task);
b38a86eb
EB
1545 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1546 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
742755a1 1547 !capable(CAP_SYS_NICE)) {
c69e8d9c 1548 rcu_read_unlock();
742755a1 1549 err = -EPERM;
5e9a0f02 1550 goto out;
742755a1 1551 }
c69e8d9c 1552 rcu_read_unlock();
742755a1 1553
86c3a764
DQ
1554 err = security_task_movememory(task);
1555 if (err)
5e9a0f02 1556 goto out;
86c3a764 1557
3268c63e
CL
1558 task_nodes = cpuset_mems_allowed(task);
1559 mm = get_task_mm(task);
1560 put_task_struct(task);
1561
6e8b09ea
SL
1562 if (!mm)
1563 return -EINVAL;
1564
1565 if (nodes)
1566 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1567 nodes, status, flags);
1568 else
1569 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 1570
742755a1
CL
1571 mmput(mm);
1572 return err;
3268c63e
CL
1573
1574out:
1575 put_task_struct(task);
1576 return err;
742755a1 1577}
742755a1 1578
7039e1db
PZ
1579#ifdef CONFIG_NUMA_BALANCING
1580/*
1581 * Returns true if this is a safe migration target node for misplaced NUMA
1582 * pages. Currently it only checks the watermarks which crude
1583 */
1584static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 1585 unsigned long nr_migrate_pages)
7039e1db
PZ
1586{
1587 int z;
1588 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1589 struct zone *zone = pgdat->node_zones + z;
1590
1591 if (!populated_zone(zone))
1592 continue;
1593
6e543d57 1594 if (!zone_reclaimable(zone))
7039e1db
PZ
1595 continue;
1596
1597 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1598 if (!zone_watermark_ok(zone, 0,
1599 high_wmark_pages(zone) +
1600 nr_migrate_pages,
1601 0, 0))
1602 continue;
1603 return true;
1604 }
1605 return false;
1606}
1607
1608static struct page *alloc_misplaced_dst_page(struct page *page,
1609 unsigned long data,
1610 int **result)
1611{
1612 int nid = (int) data;
1613 struct page *newpage;
1614
96db800f 1615 newpage = __alloc_pages_node(nid,
e97ca8e5
JW
1616 (GFP_HIGHUSER_MOVABLE |
1617 __GFP_THISNODE | __GFP_NOMEMALLOC |
1618 __GFP_NORETRY | __GFP_NOWARN) &
8479eba7 1619 ~__GFP_RECLAIM, 0);
bac0382c 1620
7039e1db
PZ
1621 return newpage;
1622}
1623
a8f60772
MG
1624/*
1625 * page migration rate limiting control.
1626 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1627 * window of time. Default here says do not migrate more than 1280M per second.
1628 */
1629static unsigned int migrate_interval_millisecs __read_mostly = 100;
1630static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1631
b32967ff 1632/* Returns true if the node is migrate rate-limited after the update */
1c30e017
MG
1633static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1634 unsigned long nr_pages)
7039e1db 1635{
a8f60772
MG
1636 /*
1637 * Rate-limit the amount of data that is being migrated to a node.
1638 * Optimal placement is no good if the memory bus is saturated and
1639 * all the time is being spent migrating!
1640 */
a8f60772 1641 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1c5e9c27 1642 spin_lock(&pgdat->numabalancing_migrate_lock);
a8f60772
MG
1643 pgdat->numabalancing_migrate_nr_pages = 0;
1644 pgdat->numabalancing_migrate_next_window = jiffies +
1645 msecs_to_jiffies(migrate_interval_millisecs);
1c5e9c27 1646 spin_unlock(&pgdat->numabalancing_migrate_lock);
a8f60772 1647 }
af1839d7
MG
1648 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1649 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1650 nr_pages);
1c5e9c27 1651 return true;
af1839d7 1652 }
1c5e9c27
MG
1653
1654 /*
1655 * This is an unlocked non-atomic update so errors are possible.
1656 * The consequences are failing to migrate when we potentiall should
1657 * have which is not severe enough to warrant locking. If it is ever
1658 * a problem, it can be converted to a per-cpu counter.
1659 */
1660 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1661 return false;
b32967ff
MG
1662}
1663
1c30e017 1664static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff 1665{
340ef390 1666 int page_lru;
a8f60772 1667
309381fe 1668 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
3abef4e6 1669
7039e1db 1670 /* Avoid migrating to a node that is nearly full */
340ef390
HD
1671 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1672 return 0;
7039e1db 1673
340ef390
HD
1674 if (isolate_lru_page(page))
1675 return 0;
7039e1db 1676
340ef390
HD
1677 /*
1678 * migrate_misplaced_transhuge_page() skips page migration's usual
1679 * check on page_count(), so we must do it here, now that the page
1680 * has been isolated: a GUP pin, or any other pin, prevents migration.
1681 * The expected page count is 3: 1 for page's mapcount and 1 for the
1682 * caller's pin and 1 for the reference taken by isolate_lru_page().
1683 */
1684 if (PageTransHuge(page) && page_count(page) != 3) {
1685 putback_lru_page(page);
1686 return 0;
7039e1db
PZ
1687 }
1688
340ef390
HD
1689 page_lru = page_is_file_cache(page);
1690 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru,
1691 hpage_nr_pages(page));
1692
149c33e1 1693 /*
340ef390
HD
1694 * Isolating the page has taken another reference, so the
1695 * caller's reference can be safely dropped without the page
1696 * disappearing underneath us during migration.
149c33e1
MG
1697 */
1698 put_page(page);
340ef390 1699 return 1;
b32967ff
MG
1700}
1701
de466bd6
MG
1702bool pmd_trans_migrating(pmd_t pmd)
1703{
1704 struct page *page = pmd_page(pmd);
1705 return PageLocked(page);
1706}
1707
b32967ff
MG
1708/*
1709 * Attempt to migrate a misplaced page to the specified destination
1710 * node. Caller is expected to have an elevated reference count on
1711 * the page that will be dropped by this function before returning.
1712 */
1bc115d8
MG
1713int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1714 int node)
b32967ff
MG
1715{
1716 pg_data_t *pgdat = NODE_DATA(node);
340ef390 1717 int isolated;
b32967ff
MG
1718 int nr_remaining;
1719 LIST_HEAD(migratepages);
1720
1721 /*
1bc115d8
MG
1722 * Don't migrate file pages that are mapped in multiple processes
1723 * with execute permissions as they are probably shared libraries.
b32967ff 1724 */
1bc115d8
MG
1725 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1726 (vma->vm_flags & VM_EXEC))
b32967ff 1727 goto out;
b32967ff
MG
1728
1729 /*
1730 * Rate-limit the amount of data that is being migrated to a node.
1731 * Optimal placement is no good if the memory bus is saturated and
1732 * all the time is being spent migrating!
1733 */
340ef390 1734 if (numamigrate_update_ratelimit(pgdat, 1))
b32967ff 1735 goto out;
b32967ff
MG
1736
1737 isolated = numamigrate_isolate_page(pgdat, page);
1738 if (!isolated)
1739 goto out;
1740
1741 list_add(&page->lru, &migratepages);
9c620e2b 1742 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
68711a74
DR
1743 NULL, node, MIGRATE_ASYNC,
1744 MR_NUMA_MISPLACED);
b32967ff 1745 if (nr_remaining) {
59c82b70
JK
1746 if (!list_empty(&migratepages)) {
1747 list_del(&page->lru);
1748 dec_zone_page_state(page, NR_ISOLATED_ANON +
1749 page_is_file_cache(page));
1750 putback_lru_page(page);
1751 }
b32967ff
MG
1752 isolated = 0;
1753 } else
1754 count_vm_numa_event(NUMA_PAGE_MIGRATE);
7039e1db 1755 BUG_ON(!list_empty(&migratepages));
7039e1db 1756 return isolated;
340ef390
HD
1757
1758out:
1759 put_page(page);
1760 return 0;
7039e1db 1761}
220018d3 1762#endif /* CONFIG_NUMA_BALANCING */
b32967ff 1763
220018d3 1764#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
340ef390
HD
1765/*
1766 * Migrates a THP to a given target node. page must be locked and is unlocked
1767 * before returning.
1768 */
b32967ff
MG
1769int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1770 struct vm_area_struct *vma,
1771 pmd_t *pmd, pmd_t entry,
1772 unsigned long address,
1773 struct page *page, int node)
1774{
c4088ebd 1775 spinlock_t *ptl;
b32967ff
MG
1776 pg_data_t *pgdat = NODE_DATA(node);
1777 int isolated = 0;
1778 struct page *new_page = NULL;
b32967ff 1779 int page_lru = page_is_file_cache(page);
f714f4f2
MG
1780 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1781 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
2b4847e7 1782 pmd_t orig_entry;
b32967ff 1783
b32967ff
MG
1784 /*
1785 * Rate-limit the amount of data that is being migrated to a node.
1786 * Optimal placement is no good if the memory bus is saturated and
1787 * all the time is being spent migrating!
1788 */
d28d4335 1789 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
b32967ff
MG
1790 goto out_dropref;
1791
1792 new_page = alloc_pages_node(node,
71baba4b 1793 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
e97ca8e5 1794 HPAGE_PMD_ORDER);
340ef390
HD
1795 if (!new_page)
1796 goto out_fail;
9a982250 1797 prep_transhuge_page(new_page);
340ef390 1798
b32967ff 1799 isolated = numamigrate_isolate_page(pgdat, page);
340ef390 1800 if (!isolated) {
b32967ff 1801 put_page(new_page);
340ef390 1802 goto out_fail;
b32967ff 1803 }
458aa76d
AK
1804 /*
1805 * We are not sure a pending tlb flush here is for a huge page
1806 * mapping or not. Hence use the tlb range variant
1807 */
b0943d61
MG
1808 if (mm_tlb_flush_pending(mm))
1809 flush_tlb_range(vma, mmun_start, mmun_end);
1810
b32967ff 1811 /* Prepare a page as a migration target */
48c935ad 1812 __SetPageLocked(new_page);
fa9949da 1813 __SetPageSwapBacked(new_page);
b32967ff
MG
1814
1815 /* anon mapping, we can simply copy page->mapping to the new page: */
1816 new_page->mapping = page->mapping;
1817 new_page->index = page->index;
1818 migrate_page_copy(new_page, page);
1819 WARN_ON(PageLRU(new_page));
1820
1821 /* Recheck the target PMD */
f714f4f2 1822 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
c4088ebd 1823 ptl = pmd_lock(mm, pmd);
2b4847e7
MG
1824 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1825fail_putback:
c4088ebd 1826 spin_unlock(ptl);
f714f4f2 1827 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
b32967ff
MG
1828
1829 /* Reverse changes made by migrate_page_copy() */
1830 if (TestClearPageActive(new_page))
1831 SetPageActive(page);
1832 if (TestClearPageUnevictable(new_page))
1833 SetPageUnevictable(page);
b32967ff
MG
1834
1835 unlock_page(new_page);
1836 put_page(new_page); /* Free it */
1837
a54a407f
MG
1838 /* Retake the callers reference and putback on LRU */
1839 get_page(page);
b32967ff 1840 putback_lru_page(page);
a54a407f
MG
1841 mod_zone_page_state(page_zone(page),
1842 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
eb4489f6
MG
1843
1844 goto out_unlock;
b32967ff
MG
1845 }
1846
2b4847e7 1847 orig_entry = *pmd;
b32967ff 1848 entry = mk_pmd(new_page, vma->vm_page_prot);
b32967ff 1849 entry = pmd_mkhuge(entry);
2b4847e7 1850 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
b32967ff 1851
2b4847e7
MG
1852 /*
1853 * Clear the old entry under pagetable lock and establish the new PTE.
1854 * Any parallel GUP will either observe the old page blocking on the
1855 * page lock, block on the page table lock or observe the new page.
1856 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1857 * guarantee the copy is visible before the pagetable update.
1858 */
f714f4f2 1859 flush_cache_range(vma, mmun_start, mmun_end);
d281ee61 1860 page_add_anon_rmap(new_page, vma, mmun_start, true);
8809aa2d 1861 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
f714f4f2 1862 set_pmd_at(mm, mmun_start, pmd, entry);
ce4a9cc5 1863 update_mmu_cache_pmd(vma, address, &entry);
2b4847e7
MG
1864
1865 if (page_count(page) != 2) {
f714f4f2 1866 set_pmd_at(mm, mmun_start, pmd, orig_entry);
458aa76d 1867 flush_pmd_tlb_range(vma, mmun_start, mmun_end);
34ee645e 1868 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2b4847e7 1869 update_mmu_cache_pmd(vma, address, &entry);
d281ee61 1870 page_remove_rmap(new_page, true);
2b4847e7
MG
1871 goto fail_putback;
1872 }
1873
51afb12b 1874 mlock_migrate_page(new_page, page);
d281ee61 1875 page_remove_rmap(page, true);
7cd12b4a 1876 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2b4847e7 1877
c4088ebd 1878 spin_unlock(ptl);
f714f4f2 1879 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
b32967ff 1880
11de9927
MG
1881 /* Take an "isolate" reference and put new page on the LRU. */
1882 get_page(new_page);
1883 putback_lru_page(new_page);
1884
b32967ff
MG
1885 unlock_page(new_page);
1886 unlock_page(page);
1887 put_page(page); /* Drop the rmap reference */
1888 put_page(page); /* Drop the LRU isolation reference */
1889
1890 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
1891 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
1892
b32967ff
MG
1893 mod_zone_page_state(page_zone(page),
1894 NR_ISOLATED_ANON + page_lru,
1895 -HPAGE_PMD_NR);
1896 return isolated;
1897
340ef390
HD
1898out_fail:
1899 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
b32967ff 1900out_dropref:
2b4847e7
MG
1901 ptl = pmd_lock(mm, pmd);
1902 if (pmd_same(*pmd, entry)) {
4d942466 1903 entry = pmd_modify(entry, vma->vm_page_prot);
f714f4f2 1904 set_pmd_at(mm, mmun_start, pmd, entry);
2b4847e7
MG
1905 update_mmu_cache_pmd(vma, address, &entry);
1906 }
1907 spin_unlock(ptl);
a54a407f 1908
eb4489f6 1909out_unlock:
340ef390 1910 unlock_page(page);
b32967ff 1911 put_page(page);
b32967ff
MG
1912 return 0;
1913}
7039e1db
PZ
1914#endif /* CONFIG_NUMA_BALANCING */
1915
1916#endif /* CONFIG_NUMA */