mm/mlock.c: use page_zone() instead of page_zone_id()
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Fri, 8 Sep 2017 23:12:59 +0000 (16:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 9 Sep 2017 01:26:47 +0000 (18:26 -0700)
page_zone_id() is a specialized function to compare the zone for the pages
that are within the section range.  If the section of the pages are
different, page_zone_id() can be different even if their zone is the same.
This wrong usage doesn't cause any actual problem since
__munlock_pagevec_fill() would be called again with failed index.
However, it's better to use more appropriate function here.

Link: http://lkml.kernel.org/r/1503559211-10259-1-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/mlock.c

index b562b5523a6544e6c0ae6e4f792943441f6217a3..dfc6f19121768ee85f269a8ddd1d5bc0f7a5b6fe 100644 (file)
@@ -365,8 +365,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
  * @start + PAGE_SIZE when no page could be added by the pte walk.
  */
 static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
-               struct vm_area_struct *vma, int zoneid, unsigned long start,
-               unsigned long end)
+                       struct vm_area_struct *vma, struct zone *zone,
+                       unsigned long start, unsigned long end)
 {
        pte_t *pte;
        spinlock_t *ptl;
@@ -394,7 +394,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
                 * Break if page could not be obtained or the page's node+zone does not
                 * match
                 */
-               if (!page || page_zone_id(page) != zoneid)
+               if (!page || page_zone(page) != zone)
                        break;
 
                /*
@@ -446,7 +446,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                unsigned long page_increm;
                struct pagevec pvec;
                struct zone *zone;
-               int zoneid;
 
                pagevec_init(&pvec, 0);
                /*
@@ -481,7 +480,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                                 */
                                pagevec_add(&pvec, page);
                                zone = page_zone(page);
-                               zoneid = page_zone_id(page);
 
                                /*
                                 * Try to fill the rest of pagevec using fast
@@ -490,7 +488,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                                 * pagevec.
                                 */
                                start = __munlock_pagevec_fill(&pvec, vma,
-                                               zoneid, start, end);
+                                               zone, start, end);
                                __munlock_pagevec(&pvec, zone);
                                goto next;
                        }