Merge branch 'msm-fix' of git://codeaurora.org/quic/kernel/davidb/linux-msm into...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / swap.c
index a91caf754d9badb5f0b1f3c54b8e86be4723ff84..b0f529b38979447d0ec192f8b6434af5bab28e02 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -23,7 +23,6 @@
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/mm_inline.h>
-#include <linux/buffer_head.h> /* for try_to_release_page() */
 #include <linux/percpu_counter.h>
 #include <linux/percpu.h>
 #include <linux/cpu.h>
@@ -54,7 +53,7 @@ static void __page_cache_release(struct page *page)
                spin_lock_irqsave(&zone->lru_lock, flags);
                VM_BUG_ON(!PageLRU(page));
                __ClearPageLRU(page);
-               del_page_from_lru(zone, page);
+               del_page_from_lru_list(zone, page, page_off_lru(page));
                spin_unlock_irqrestore(&zone->lru_lock, flags);
        }
 }
@@ -232,12 +231,14 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
 static void pagevec_move_tail_fn(struct page *page, void *arg)
 {
        int *pgmoved = arg;
-       struct zone *zone = page_zone(page);
 
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                enum lru_list lru = page_lru_base_type(page);
-               list_move_tail(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_rotate_reclaimable_page(page);
+               struct lruvec *lruvec;
+
+               lruvec = mem_cgroup_lru_move_lists(page_zone(page),
+                                                  page, lru, lru);
+               list_move_tail(&page->lru, &lruvec->lists[lru]);
                (*pgmoved)++;
        }
 }
@@ -368,7 +369,6 @@ void mark_page_accessed(struct page *page)
                SetPageReferenced(page);
        }
 }
-
 EXPORT_SYMBOL(mark_page_accessed);
 
 void __lru_cache_add(struct page *page, enum lru_list lru)
@@ -377,7 +377,7 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
 
        page_cache_get(page);
        if (!pagevec_add(pvec, page))
-               ____pagevec_lru_add(pvec, lru);
+               __pagevec_lru_add(pvec, lru);
        put_cpu_var(lru_add_pvecs);
 }
 EXPORT_SYMBOL(__lru_cache_add);
@@ -476,12 +476,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
                 */
                SetPageReclaim(page);
        } else {
+               struct lruvec *lruvec;
                /*
                 * The page's writeback ends up during pagevec
                 * We moves tha page into tail of inactive.
                 */
-               list_move_tail(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_rotate_reclaimable_page(page);
+               lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
+               list_move_tail(&page->lru, &lruvec->lists[lru]);
                __count_vm_event(PGROTATED);
        }
 
@@ -504,7 +505,7 @@ static void drain_cpu_pagevecs(int cpu)
        for_each_lru(lru) {
                pvec = &pvecs[lru - LRU_BASE];
                if (pagevec_count(pvec))
-                       ____pagevec_lru_add(pvec, lru);
+                       __pagevec_lru_add(pvec, lru);
        }
 
        pvec = &per_cpu(lru_rotate_pvecs, cpu);
@@ -585,11 +586,10 @@ int lru_add_drain_all(void)
 void release_pages(struct page **pages, int nr, int cold)
 {
        int i;
-       struct pagevec pages_to_free;
+       LIST_HEAD(pages_to_free);
        struct zone *zone = NULL;
        unsigned long uninitialized_var(flags);
 
-       pagevec_init(&pages_to_free, cold);
        for (i = 0; i < nr; i++) {
                struct page *page = pages[i];
 
@@ -617,22 +617,15 @@ void release_pages(struct page **pages, int nr, int cold)
                        }
                        VM_BUG_ON(!PageLRU(page));
                        __ClearPageLRU(page);
-                       del_page_from_lru(zone, page);
+                       del_page_from_lru_list(zone, page, page_off_lru(page));
                }
 
-               if (!pagevec_add(&pages_to_free, page)) {
-                       if (zone) {
-                               spin_unlock_irqrestore(&zone->lru_lock, flags);
-                               zone = NULL;
-                       }
-                       __pagevec_free(&pages_to_free);
-                       pagevec_reinit(&pages_to_free);
-               }
+               list_add(&page->lru, &pages_to_free);
        }
        if (zone)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-       pagevec_free(&pages_to_free);
+       free_hot_cold_page_list(&pages_to_free, cold);
 }
 EXPORT_SYMBOL(release_pages);
 
@@ -652,9 +645,9 @@ void __pagevec_release(struct pagevec *pvec)
        release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
        pagevec_reinit(pvec);
 }
-
 EXPORT_SYMBOL(__pagevec_release);
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* used by __split_huge_page_refcount() */
 void lru_add_page_tail(struct zone* zone,
                       struct page *page, struct page *page_tail)
@@ -662,7 +655,6 @@ void lru_add_page_tail(struct zone* zone,
        int active;
        enum lru_list lru;
        const int file = 0;
-       struct list_head *head;
 
        VM_BUG_ON(!PageHead(page));
        VM_BUG_ON(PageCompound(page_tail));
@@ -681,18 +673,30 @@ void lru_add_page_tail(struct zone* zone,
                        lru = LRU_INACTIVE_ANON;
                }
                update_page_reclaim_stat(zone, page_tail, file, active);
-               if (likely(PageLRU(page)))
-                       head = page->lru.prev;
-               else
-                       head = &zone->lru[lru].list;
-               __add_page_to_lru_list(zone, page_tail, lru, head);
        } else {
                SetPageUnevictable(page_tail);
-               add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
+               lru = LRU_UNEVICTABLE;
+       }
+
+       if (likely(PageLRU(page)))
+               list_add_tail(&page_tail->lru, &page->lru);
+       else {
+               struct list_head *list_head;
+               /*
+                * Head page has not yet been counted, as an hpage,
+                * so we must account for each subpage individually.
+                *
+                * Use the standard add function to put page_tail on the list,
+                * but then correct its position so they all end up in order.
+                */
+               add_page_to_lru_list(zone, page_tail, lru);
+               list_head = page_tail->lru.prev;
+               list_move_tail(&page_tail->lru, list_head);
        }
 }
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static void ____pagevec_lru_add_fn(struct page *page, void *arg)
+static void __pagevec_lru_add_fn(struct page *page, void *arg)
 {
        enum lru_list lru = (enum lru_list)arg;
        struct zone *zone = page_zone(page);
@@ -714,32 +718,13 @@ static void ____pagevec_lru_add_fn(struct page *page, void *arg)
  * Add the passed pages to the LRU, then drop the caller's refcount
  * on them.  Reinitialises the caller's pagevec.
  */
-void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
+void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
 {
        VM_BUG_ON(is_unevictable_lru(lru));
 
-       pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
-}
-
-EXPORT_SYMBOL(____pagevec_lru_add);
-
-/*
- * Try to drop buffers from the pages in a pagevec
- */
-void pagevec_strip(struct pagevec *pvec)
-{
-       int i;
-
-       for (i = 0; i < pagevec_count(pvec); i++) {
-               struct page *page = pvec->pages[i];
-
-               if (page_has_private(page) && trylock_page(page)) {
-                       if (page_has_private(page))
-                               try_to_release_page(page, 0);
-                       unlock_page(page);
-               }
-       }
+       pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
 }
+EXPORT_SYMBOL(__pagevec_lru_add);
 
 /**
  * pagevec_lookup - gang pagecache lookup
@@ -763,7 +748,6 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
        pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
        return pagevec_count(pvec);
 }
-
 EXPORT_SYMBOL(pagevec_lookup);
 
 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
@@ -773,7 +757,6 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
                                        nr_pages, pvec->pages);
        return pagevec_count(pvec);
 }
-
 EXPORT_SYMBOL(pagevec_lookup_tag);
 
 /*