2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/nsproxy.h>
23 #include <linux/pagevec.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/memcontrol.h>
33 #include <linux/syscalls.h>
37 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
40 * migrate_prep() needs to be called before we start compiling a list of pages
41 * to be migrated using isolate_lru_page().
43 int migrate_prep(void)
46 * Clear the LRU lists so pages can be isolated.
47 * Note that pages may be moved off the LRU after we have
48 * drained them. Those pages will fail to migrate like other
49 * pages that may be busy.
57 * Add isolated pages on the list back to the LRU under page lock
58 * to avoid leaking evictable pages back onto unevictable list.
60 * returns the number of pages put back.
62 int putback_lru_pages(struct list_head
*l
)
68 list_for_each_entry_safe(page
, page2
, l
, lru
) {
70 putback_lru_page(page
);
77 * Restore a potential migration pte to a working pte entry
79 static void remove_migration_pte(struct vm_area_struct
*vma
,
80 struct page
*old
, struct page
*new)
82 struct mm_struct
*mm
= vma
->vm_mm
;
89 unsigned long addr
= page_address_in_vma(new, vma
);
94 pgd
= pgd_offset(mm
, addr
);
95 if (!pgd_present(*pgd
))
98 pud
= pud_offset(pgd
, addr
);
99 if (!pud_present(*pud
))
102 pmd
= pmd_offset(pud
, addr
);
103 if (!pmd_present(*pmd
))
106 ptep
= pte_offset_map(pmd
, addr
);
108 if (!is_swap_pte(*ptep
)) {
113 ptl
= pte_lockptr(mm
, pmd
);
116 if (!is_swap_pte(pte
))
119 entry
= pte_to_swp_entry(pte
);
121 if (!is_migration_entry(entry
) || migration_entry_to_page(entry
) != old
)
125 pte
= pte_mkold(mk_pte(new, vma
->vm_page_prot
));
126 if (is_write_migration_entry(entry
))
127 pte
= pte_mkwrite(pte
);
128 flush_cache_page(vma
, addr
, pte_pfn(pte
));
129 set_pte_at(mm
, addr
, ptep
, pte
);
132 page_add_anon_rmap(new, vma
, addr
);
134 page_add_file_rmap(new);
136 /* No need to invalidate - it was non-present before */
137 update_mmu_cache(vma
, addr
, pte
);
140 pte_unmap_unlock(ptep
, ptl
);
144 * Note that remove_file_migration_ptes will only work on regular mappings,
145 * Nonlinear mappings do not use migration entries.
147 static void remove_file_migration_ptes(struct page
*old
, struct page
*new)
149 struct vm_area_struct
*vma
;
150 struct address_space
*mapping
= new->mapping
;
151 struct prio_tree_iter iter
;
152 pgoff_t pgoff
= new->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
157 spin_lock(&mapping
->i_mmap_lock
);
159 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
)
160 remove_migration_pte(vma
, old
, new);
162 spin_unlock(&mapping
->i_mmap_lock
);
166 * Must hold mmap_sem lock on at least one of the vmas containing
167 * the page so that the anon_vma cannot vanish.
169 static void remove_anon_migration_ptes(struct page
*old
, struct page
*new)
171 struct anon_vma
*anon_vma
;
172 struct vm_area_struct
*vma
;
173 unsigned long mapping
;
175 mapping
= (unsigned long)new->mapping
;
177 if (!mapping
|| (mapping
& PAGE_MAPPING_ANON
) == 0)
181 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
183 anon_vma
= (struct anon_vma
*) (mapping
- PAGE_MAPPING_ANON
);
184 spin_lock(&anon_vma
->lock
);
186 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
)
187 remove_migration_pte(vma
, old
, new);
189 spin_unlock(&anon_vma
->lock
);
193 * Get rid of all migration entries and replace them by
194 * references to the indicated page.
196 static void remove_migration_ptes(struct page
*old
, struct page
*new)
199 remove_anon_migration_ptes(old
, new);
201 remove_file_migration_ptes(old
, new);
205 * Something used the pte of a page under migration. We need to
206 * get to the page and wait until migration is finished.
207 * When we return from this function the fault will be retried.
209 * This function is called from do_swap_page().
211 void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
212 unsigned long address
)
219 ptep
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
221 if (!is_swap_pte(pte
))
224 entry
= pte_to_swp_entry(pte
);
225 if (!is_migration_entry(entry
))
228 page
= migration_entry_to_page(entry
);
231 * Once radix-tree replacement of page migration started, page_count
232 * *must* be zero. And, we don't want to call wait_on_page_locked()
233 * against a page without get_page().
234 * So, we use get_page_unless_zero(), here. Even failed, page fault
237 if (!get_page_unless_zero(page
))
239 pte_unmap_unlock(ptep
, ptl
);
240 wait_on_page_locked(page
);
244 pte_unmap_unlock(ptep
, ptl
);
248 * Replace the page in the mapping.
250 * The number of remaining references must be:
251 * 1 for anonymous pages without a mapping
252 * 2 for pages with a mapping
253 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
255 static int migrate_page_move_mapping(struct address_space
*mapping
,
256 struct page
*newpage
, struct page
*page
)
262 /* Anonymous page without mapping */
263 if (page_count(page
) != 1)
268 spin_lock_irq(&mapping
->tree_lock
);
270 pslot
= radix_tree_lookup_slot(&mapping
->page_tree
,
273 expected_count
= 2 + !!page_has_private(page
);
274 if (page_count(page
) != expected_count
||
275 (struct page
*)radix_tree_deref_slot(pslot
) != page
) {
276 spin_unlock_irq(&mapping
->tree_lock
);
280 if (!page_freeze_refs(page
, expected_count
)) {
281 spin_unlock_irq(&mapping
->tree_lock
);
286 * Now we know that no one else is looking at the page.
288 get_page(newpage
); /* add cache reference */
289 if (PageSwapCache(page
)) {
290 SetPageSwapCache(newpage
);
291 set_page_private(newpage
, page_private(page
));
294 radix_tree_replace_slot(pslot
, newpage
);
296 page_unfreeze_refs(page
, expected_count
);
298 * Drop cache reference from old page.
299 * We know this isn't the last reference.
304 * If moved to a different zone then also account
305 * the page for that zone. Other VM counters will be
306 * taken care of when we establish references to the
307 * new page and drop references to the old page.
309 * Note that anonymous pages are accounted for
310 * via NR_FILE_PAGES and NR_ANON_PAGES if they
311 * are mapped to swap space.
313 __dec_zone_page_state(page
, NR_FILE_PAGES
);
314 __inc_zone_page_state(newpage
, NR_FILE_PAGES
);
315 if (PageSwapBacked(page
)) {
316 __dec_zone_page_state(page
, NR_SHMEM
);
317 __inc_zone_page_state(newpage
, NR_SHMEM
);
319 spin_unlock_irq(&mapping
->tree_lock
);
325 * Copy the page to its new location
327 static void migrate_page_copy(struct page
*newpage
, struct page
*page
)
331 copy_highpage(newpage
, page
);
334 SetPageError(newpage
);
335 if (PageReferenced(page
))
336 SetPageReferenced(newpage
);
337 if (PageUptodate(page
))
338 SetPageUptodate(newpage
);
339 if (TestClearPageActive(page
)) {
340 VM_BUG_ON(PageUnevictable(page
));
341 SetPageActive(newpage
);
343 unevictable_migrate_page(newpage
, page
);
344 if (PageChecked(page
))
345 SetPageChecked(newpage
);
346 if (PageMappedToDisk(page
))
347 SetPageMappedToDisk(newpage
);
349 if (PageDirty(page
)) {
350 clear_page_dirty_for_io(page
);
352 * Want to mark the page and the radix tree as dirty, and
353 * redo the accounting that clear_page_dirty_for_io undid,
354 * but we can't use set_page_dirty because that function
355 * is actually a signal that all of the page has become dirty.
356 * Wheras only part of our page may be dirty.
358 __set_page_dirty_nobuffers(newpage
);
361 mlock_migrate_page(newpage
, page
);
363 ClearPageSwapCache(page
);
364 ClearPagePrivate(page
);
365 set_page_private(page
, 0);
366 /* page->mapping contains a flag for PageAnon() */
367 anon
= PageAnon(page
);
368 page
->mapping
= NULL
;
371 * If any waiters have accumulated on the new page then
374 if (PageWriteback(newpage
))
375 end_page_writeback(newpage
);
378 /************************************************************
379 * Migration functions
380 ***********************************************************/
382 /* Always fail migration. Used for mappings that are not movable */
383 int fail_migrate_page(struct address_space
*mapping
,
384 struct page
*newpage
, struct page
*page
)
388 EXPORT_SYMBOL(fail_migrate_page
);
391 * Common logic to directly migrate a single page suitable for
392 * pages that do not use PagePrivate/PagePrivate2.
394 * Pages are locked upon entry and exit.
396 int migrate_page(struct address_space
*mapping
,
397 struct page
*newpage
, struct page
*page
)
401 BUG_ON(PageWriteback(page
)); /* Writeback must be complete */
403 rc
= migrate_page_move_mapping(mapping
, newpage
, page
);
408 migrate_page_copy(newpage
, page
);
411 EXPORT_SYMBOL(migrate_page
);
415 * Migration function for pages with buffers. This function can only be used
416 * if the underlying filesystem guarantees that no other references to "page"
419 int buffer_migrate_page(struct address_space
*mapping
,
420 struct page
*newpage
, struct page
*page
)
422 struct buffer_head
*bh
, *head
;
425 if (!page_has_buffers(page
))
426 return migrate_page(mapping
, newpage
, page
);
428 head
= page_buffers(page
);
430 rc
= migrate_page_move_mapping(mapping
, newpage
, page
);
439 bh
= bh
->b_this_page
;
441 } while (bh
!= head
);
443 ClearPagePrivate(page
);
444 set_page_private(newpage
, page_private(page
));
445 set_page_private(page
, 0);
451 set_bh_page(bh
, newpage
, bh_offset(bh
));
452 bh
= bh
->b_this_page
;
454 } while (bh
!= head
);
456 SetPagePrivate(newpage
);
458 migrate_page_copy(newpage
, page
);
464 bh
= bh
->b_this_page
;
466 } while (bh
!= head
);
470 EXPORT_SYMBOL(buffer_migrate_page
);
474 * Writeback a page to clean the dirty state
476 static int writeout(struct address_space
*mapping
, struct page
*page
)
478 struct writeback_control wbc
= {
479 .sync_mode
= WB_SYNC_NONE
,
482 .range_end
= LLONG_MAX
,
488 if (!mapping
->a_ops
->writepage
)
489 /* No write method for the address space */
492 if (!clear_page_dirty_for_io(page
))
493 /* Someone else already triggered a write */
497 * A dirty page may imply that the underlying filesystem has
498 * the page on some queue. So the page must be clean for
499 * migration. Writeout may mean we loose the lock and the
500 * page state is no longer what we checked for earlier.
501 * At this point we know that the migration attempt cannot
504 remove_migration_ptes(page
, page
);
506 rc
= mapping
->a_ops
->writepage(page
, &wbc
);
508 if (rc
!= AOP_WRITEPAGE_ACTIVATE
)
509 /* unlocked. Relock */
512 return (rc
< 0) ? -EIO
: -EAGAIN
;
516 * Default handling if a filesystem does not provide a migration function.
518 static int fallback_migrate_page(struct address_space
*mapping
,
519 struct page
*newpage
, struct page
*page
)
522 return writeout(mapping
, page
);
525 * Buffers may be managed in a filesystem specific way.
526 * We must have no buffers or drop them.
528 if (page_has_private(page
) &&
529 !try_to_release_page(page
, GFP_KERNEL
))
532 return migrate_page(mapping
, newpage
, page
);
536 * Move a page to a newly allocated page
537 * The page is locked and all ptes have been successfully removed.
539 * The new page will have replaced the old page if this function
546 static int move_to_new_page(struct page
*newpage
, struct page
*page
)
548 struct address_space
*mapping
;
552 * Block others from accessing the page when we get around to
553 * establishing additional references. We are the only one
554 * holding a reference to the new page at this point.
556 if (!trylock_page(newpage
))
559 /* Prepare mapping for the new page.*/
560 newpage
->index
= page
->index
;
561 newpage
->mapping
= page
->mapping
;
562 if (PageSwapBacked(page
))
563 SetPageSwapBacked(newpage
);
565 mapping
= page_mapping(page
);
567 rc
= migrate_page(mapping
, newpage
, page
);
568 else if (mapping
->a_ops
->migratepage
)
570 * Most pages have a mapping and most filesystems
571 * should provide a migration function. Anonymous
572 * pages are part of swap space which also has its
573 * own migration function. This is the most common
574 * path for page migration.
576 rc
= mapping
->a_ops
->migratepage(mapping
,
579 rc
= fallback_migrate_page(mapping
, newpage
, page
);
582 remove_migration_ptes(page
, newpage
);
584 newpage
->mapping
= NULL
;
586 unlock_page(newpage
);
592 * Obtain the lock on page, remove all ptes and migrate the page
593 * to the newly allocated page in newpage.
595 static int unmap_and_move(new_page_t get_new_page
, unsigned long private,
596 struct page
*page
, int force
)
600 struct page
*newpage
= get_new_page(page
, private, &result
);
603 struct mem_cgroup
*mem
;
608 if (page_count(page
) == 1) {
609 /* page was freed from under us. So we are done. */
613 /* prepare cgroup just returns 0 or -ENOMEM */
616 if (!trylock_page(page
)) {
622 /* charge against new page */
623 charge
= mem_cgroup_prepare_migration(page
, &mem
);
624 if (charge
== -ENOMEM
) {
630 if (PageWriteback(page
)) {
633 wait_on_page_writeback(page
);
636 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
637 * we cannot notice that anon_vma is freed while we migrates a page.
638 * This rcu_read_lock() delays freeing anon_vma pointer until the end
639 * of migration. File cache pages are no problem because of page_lock()
640 * File Caches may use write_page() or lock_page() in migration, then,
641 * just care Anon page here.
643 if (PageAnon(page
)) {
649 * Corner case handling:
650 * 1. When a new swap-cache page is read into, it is added to the LRU
651 * and treated as swapcache but it has no rmap yet.
652 * Calling try_to_unmap() against a page->mapping==NULL page will
653 * trigger a BUG. So handle it here.
654 * 2. An orphaned page (see truncate_complete_page) might have
655 * fs-private metadata. The page can be picked up due to memory
656 * offlining. Everywhere else except page reclaim, the page is
657 * invisible to the vm, so the page can not be migrated. So try to
658 * free the metadata, so the page can be freed.
660 if (!page
->mapping
) {
661 if (!PageAnon(page
) && page_has_private(page
)) {
663 * Go direct to try_to_free_buffers() here because
664 * a) that's what try_to_release_page() would do anyway
665 * b) we may be under rcu_read_lock() here, so we can't
666 * use GFP_KERNEL which is what try_to_release_page()
667 * needs to be effective.
669 try_to_free_buffers(page
);
675 /* Establish migration ptes or remove ptes */
676 try_to_unmap(page
, 1);
679 if (!page_mapped(page
))
680 rc
= move_to_new_page(newpage
, page
);
683 remove_migration_ptes(page
, page
);
689 mem_cgroup_end_migration(mem
, page
, newpage
);
695 * A page that has been migrated has all references
696 * removed and will be freed. A page that has not been
697 * migrated will have kepts its references and be
700 list_del(&page
->lru
);
701 putback_lru_page(page
);
707 * Move the new page to the LRU. If migration was not successful
708 * then this will free the page.
710 putback_lru_page(newpage
);
716 *result
= page_to_nid(newpage
);
724 * The function takes one list of pages to migrate and a function
725 * that determines from the page to be migrated and the private data
726 * the target of the move and allocates the page.
728 * The function returns after 10 attempts or if no pages
729 * are movable anymore because to has become empty
730 * or no retryable pages exist anymore. All pages will be
731 * returned to the LRU or freed.
733 * Return: Number of pages not migrated or error code.
735 int migrate_pages(struct list_head
*from
,
736 new_page_t get_new_page
, unsigned long private)
743 int swapwrite
= current
->flags
& PF_SWAPWRITE
;
747 current
->flags
|= PF_SWAPWRITE
;
749 for(pass
= 0; pass
< 10 && retry
; pass
++) {
752 list_for_each_entry_safe(page
, page2
, from
, lru
) {
755 rc
= unmap_and_move(get_new_page
, private,
767 /* Permanent failure */
776 current
->flags
&= ~PF_SWAPWRITE
;
778 putback_lru_pages(from
);
783 return nr_failed
+ retry
;
788 * Move a list of individual pages
790 struct page_to_node
{
797 static struct page
*new_page_node(struct page
*p
, unsigned long private,
800 struct page_to_node
*pm
= (struct page_to_node
*)private;
802 while (pm
->node
!= MAX_NUMNODES
&& pm
->page
!= p
)
805 if (pm
->node
== MAX_NUMNODES
)
808 *result
= &pm
->status
;
810 return alloc_pages_exact_node(pm
->node
,
811 GFP_HIGHUSER_MOVABLE
| GFP_THISNODE
, 0);
815 * Move a set of pages as indicated in the pm array. The addr
816 * field must be set to the virtual address of the page to be moved
817 * and the node number must contain a valid target node.
818 * The pm array ends with node = MAX_NUMNODES.
820 static int do_move_page_to_node_array(struct mm_struct
*mm
,
821 struct page_to_node
*pm
,
825 struct page_to_node
*pp
;
828 down_read(&mm
->mmap_sem
);
831 * Build a list of pages to migrate
833 for (pp
= pm
; pp
->node
!= MAX_NUMNODES
; pp
++) {
834 struct vm_area_struct
*vma
;
838 vma
= find_vma(mm
, pp
->addr
);
839 if (!vma
|| !vma_migratable(vma
))
842 page
= follow_page(vma
, pp
->addr
, FOLL_GET
);
852 if (PageReserved(page
)) /* Check for zero page */
856 err
= page_to_nid(page
);
860 * Node already in the right place
865 if (page_mapcount(page
) > 1 &&
869 err
= isolate_lru_page(page
);
871 list_add_tail(&page
->lru
, &pagelist
);
874 * Either remove the duplicate refcount from
875 * isolate_lru_page() or drop the page ref if it was
884 if (!list_empty(&pagelist
))
885 err
= migrate_pages(&pagelist
, new_page_node
,
888 up_read(&mm
->mmap_sem
);
893 * Migrate an array of page address onto an array of nodes and fill
894 * the corresponding array of status.
896 static int do_pages_move(struct mm_struct
*mm
, struct task_struct
*task
,
897 unsigned long nr_pages
,
898 const void __user
* __user
*pages
,
899 const int __user
*nodes
,
900 int __user
*status
, int flags
)
902 struct page_to_node
*pm
;
903 nodemask_t task_nodes
;
904 unsigned long chunk_nr_pages
;
905 unsigned long chunk_start
;
908 task_nodes
= cpuset_mems_allowed(task
);
911 pm
= (struct page_to_node
*)__get_free_page(GFP_KERNEL
);
918 * Store a chunk of page_to_node array in a page,
919 * but keep the last one as a marker
921 chunk_nr_pages
= (PAGE_SIZE
/ sizeof(struct page_to_node
)) - 1;
923 for (chunk_start
= 0;
924 chunk_start
< nr_pages
;
925 chunk_start
+= chunk_nr_pages
) {
928 if (chunk_start
+ chunk_nr_pages
> nr_pages
)
929 chunk_nr_pages
= nr_pages
- chunk_start
;
931 /* fill the chunk pm with addrs and nodes from user-space */
932 for (j
= 0; j
< chunk_nr_pages
; j
++) {
933 const void __user
*p
;
937 if (get_user(p
, pages
+ j
+ chunk_start
))
939 pm
[j
].addr
= (unsigned long) p
;
941 if (get_user(node
, nodes
+ j
+ chunk_start
))
945 if (!node_state(node
, N_HIGH_MEMORY
))
949 if (!node_isset(node
, task_nodes
))
955 /* End marker for this chunk */
956 pm
[chunk_nr_pages
].node
= MAX_NUMNODES
;
958 /* Migrate this chunk */
959 err
= do_move_page_to_node_array(mm
, pm
,
960 flags
& MPOL_MF_MOVE_ALL
);
964 /* Return status information */
965 for (j
= 0; j
< chunk_nr_pages
; j
++)
966 if (put_user(pm
[j
].status
, status
+ j
+ chunk_start
)) {
974 free_page((unsigned long)pm
);
980 * Determine the nodes of an array of pages and store it in an array of status.
982 static void do_pages_stat_array(struct mm_struct
*mm
, unsigned long nr_pages
,
983 const void __user
**pages
, int *status
)
987 down_read(&mm
->mmap_sem
);
989 for (i
= 0; i
< nr_pages
; i
++) {
990 unsigned long addr
= (unsigned long)(*pages
);
991 struct vm_area_struct
*vma
;
995 vma
= find_vma(mm
, addr
);
999 page
= follow_page(vma
, addr
, 0);
1001 err
= PTR_ERR(page
);
1006 /* Use PageReserved to check for zero page */
1007 if (!page
|| PageReserved(page
))
1010 err
= page_to_nid(page
);
1018 up_read(&mm
->mmap_sem
);
1022 * Determine the nodes of a user array of pages and store it in
1023 * a user array of status.
1025 static int do_pages_stat(struct mm_struct
*mm
, unsigned long nr_pages
,
1026 const void __user
* __user
*pages
,
1029 #define DO_PAGES_STAT_CHUNK_NR 16
1030 const void __user
*chunk_pages
[DO_PAGES_STAT_CHUNK_NR
];
1031 int chunk_status
[DO_PAGES_STAT_CHUNK_NR
];
1032 unsigned long i
, chunk_nr
= DO_PAGES_STAT_CHUNK_NR
;
1035 for (i
= 0; i
< nr_pages
; i
+= chunk_nr
) {
1036 if (chunk_nr
+ i
> nr_pages
)
1037 chunk_nr
= nr_pages
- i
;
1039 err
= copy_from_user(chunk_pages
, &pages
[i
],
1040 chunk_nr
* sizeof(*chunk_pages
));
1046 do_pages_stat_array(mm
, chunk_nr
, chunk_pages
, chunk_status
);
1048 err
= copy_to_user(&status
[i
], chunk_status
,
1049 chunk_nr
* sizeof(*chunk_status
));
1062 * Move a list of pages in the address space of the currently executing
1065 SYSCALL_DEFINE6(move_pages
, pid_t
, pid
, unsigned long, nr_pages
,
1066 const void __user
* __user
*, pages
,
1067 const int __user
*, nodes
,
1068 int __user
*, status
, int, flags
)
1070 const struct cred
*cred
= current_cred(), *tcred
;
1071 struct task_struct
*task
;
1072 struct mm_struct
*mm
;
1076 if (flags
& ~(MPOL_MF_MOVE
|MPOL_MF_MOVE_ALL
))
1079 if ((flags
& MPOL_MF_MOVE_ALL
) && !capable(CAP_SYS_NICE
))
1082 /* Find the mm_struct */
1083 read_lock(&tasklist_lock
);
1084 task
= pid
? find_task_by_vpid(pid
) : current
;
1086 read_unlock(&tasklist_lock
);
1089 mm
= get_task_mm(task
);
1090 read_unlock(&tasklist_lock
);
1096 * Check if this process has the right to modify the specified
1097 * process. The right exists if the process has administrative
1098 * capabilities, superuser privileges or the same
1099 * userid as the target process.
1102 tcred
= __task_cred(task
);
1103 if (cred
->euid
!= tcred
->suid
&& cred
->euid
!= tcred
->uid
&&
1104 cred
->uid
!= tcred
->suid
&& cred
->uid
!= tcred
->uid
&&
1105 !capable(CAP_SYS_NICE
)) {
1112 err
= security_task_movememory(task
);
1117 err
= do_pages_move(mm
, task
, nr_pages
, pages
, nodes
, status
,
1120 err
= do_pages_stat(mm
, nr_pages
, pages
, status
);
1129 * Call migration functions in the vma_ops that may prepare
1130 * memory in a vm for migration. migration functions may perform
1131 * the migration for vmas that do not have an underlying page struct.
1133 int migrate_vmas(struct mm_struct
*mm
, const nodemask_t
*to
,
1134 const nodemask_t
*from
, unsigned long flags
)
1136 struct vm_area_struct
*vma
;
1139 for (vma
= mm
->mmap
; vma
&& !err
; vma
= vma
->vm_next
) {
1140 if (vma
->vm_ops
&& vma
->vm_ops
->migrate
) {
1141 err
= vma
->vm_ops
->migrate(vma
, to
, from
, flags
);