summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
c5a5105)
munlock_vma_pages_range() was always incrementing addresses by PAGE_SIZE
at a time. When munlocking THP pages (or the huge zero page), this
resulted in taking the mm->page_table_lock 512 times in a row.
We can do better by making use of the page_mask returned by
follow_page_mask (for the huge zero page case), or the size of the page
munlock_vma_page() operated on (for the true THP page case).
Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
* must be called with vma's mmap_sem held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
* must be called with vma's mmap_sem held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
-extern void munlock_vma_page(struct page *page);
+extern unsigned int munlock_vma_page(struct page *page);
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
* can't isolate the page, we leave it for putback_lru_page() and vmscan
* [page_referenced()/try_to_unmap()] to deal with.
*/
* can't isolate the page, we leave it for putback_lru_page() and vmscan
* [page_referenced()/try_to_unmap()] to deal with.
*/
-void munlock_vma_page(struct page *page)
+unsigned int munlock_vma_page(struct page *page)
+ unsigned int page_mask = 0;
+
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) {
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) {
- mod_zone_page_state(page_zone(page), NR_MLOCK,
- -hpage_nr_pages(page));
+ unsigned int nr_pages = hpage_nr_pages(page);
+ mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+ page_mask = nr_pages - 1;
if (!isolate_lru_page(page)) {
int ret = SWAP_AGAIN;
if (!isolate_lru_page(page)) {
int ret = SWAP_AGAIN;
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
}
}
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
}
}
unsigned long start, unsigned long end, int *nonblocking)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long start, unsigned long end, int *nonblocking)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long addr = start;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
* We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here.
*/
* We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here.
*/
- return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
+ return __get_user_pages(current, mm, start, nr_pages, gup_flags,
NULL, NULL, nonblocking);
}
NULL, NULL, nonblocking);
}
void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- unsigned long addr;
-
- lru_add_drain();
vma->vm_flags &= ~VM_LOCKED;
vma->vm_flags &= ~VM_LOCKED;
- for (addr = start; addr < end; addr += PAGE_SIZE) {
+ unsigned int page_mask, page_increm;
+
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
* suits munlock very well (and if somehow an abnormal page
* has sneaked into the range, we won't oops here: great).
*/
* suits munlock very well (and if somehow an abnormal page
* has sneaked into the range, we won't oops here: great).
*/
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
+ &page_mask);
if (page && !IS_ERR(page)) {
lock_page(page);
if (page && !IS_ERR(page)) {
lock_page(page);
- munlock_vma_page(page);
+ lru_add_drain();
+ /*
+ * Any THP page found by follow_page_mask() may have
+ * gotten split before reaching munlock_vma_page(),
+ * so we need to recompute the page_mask here.
+ */
+ page_mask = munlock_vma_page(page);
unlock_page(page);
put_page(page);
}
unlock_page(page);
put_page(page);
}
+ page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ start += page_increm * PAGE_SIZE;