mm/readahead.c, mm/vmscan.c: use lru_to_page instead of list_to_page
authorGeliang Tang <geliangtang@163.com>
Thu, 14 Jan 2016 23:20:51 +0000 (15:20 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Jan 2016 00:00:49 +0000 (16:00 -0800)
list_to_page() in readahead.c is the same as lru_to_page() in vmscan.c.
So I move lru_to_page to internal.h and drop list_to_page().

Signed-off-by: Geliang Tang <geliangtang@163.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/internal.h
mm/readahead.c
mm/vmscan.c

index 38e24b89e4c400394212941a1789dd75bb902198..016452d2fede4623bb9a7be2361de199e8588ada 100644 (file)
@@ -119,6 +119,8 @@ extern int isolate_lru_page(struct page *page);
 extern void putback_lru_page(struct page *page);
 extern bool zone_reclaimable(struct zone *zone);
 
+#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
+
 /*
  * in mm/rmap.c:
  */
index ba22d7fe0afbae6e8e1568a9564b6dd6399ca964..0aff760b09d4a93bf8d6f292de434e4d574fce79 100644 (file)
@@ -32,8 +32,6 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
 }
 EXPORT_SYMBOL_GPL(file_ra_state_init);
 
-#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
-
 /*
  * see if a page needs releasing upon read_cache_pages() failure
  * - the caller of read_cache_pages() may have set PG_private or PG_fscache
@@ -64,7 +62,7 @@ static void read_cache_pages_invalidate_pages(struct address_space *mapping,
        struct page *victim;
 
        while (!list_empty(pages)) {
-               victim = list_to_page(pages);
+               victim = lru_to_page(pages);
                list_del(&victim->lru);
                read_cache_pages_invalidate_page(mapping, victim);
        }
@@ -87,7 +85,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
        int ret = 0;
 
        while (!list_empty(pages)) {
-               page = list_to_page(pages);
+               page = lru_to_page(pages);
                list_del(&page->lru);
                if (add_to_page_cache_lru(page, mapping, page->index,
                                mapping_gfp_constraint(mapping, GFP_KERNEL))) {
@@ -125,7 +123,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
        }
 
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
-               struct page *page = list_to_page(pages);
+               struct page *page = lru_to_page(pages);
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping, page->index,
                                mapping_gfp_constraint(mapping, GFP_KERNEL))) {
index 1bbfd623630ebdc8966e0617ceaeef6a1fe969f9..e36d766dade97e668f8987c5c7a80b2f3af02fec 100644 (file)
@@ -106,8 +106,6 @@ struct scan_control {
        unsigned long nr_reclaimed;
 };
 
-#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
-
 #ifdef ARCH_HAS_PREFETCH
 #define prefetch_prev_lru_page(_page, _base, _field)                   \
        do {                                                            \