Merge tag 'v3.10.103' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / migrate.c
index 6f0c24438bbaaf6ffdaa4f840ab8da37cf9e236a..808f8abb1b8ff04aa322ca74829661f999f79390 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/mempolicy.h>
 #include <linux/vmalloc.h>
 #include <linux/security.h>
+#include <linux/backing-dev.h>
 #include <linux/memcontrol.h>
 #include <linux/syscalls.h>
 #include <linux/hugetlb.h>
@@ -103,7 +104,7 @@ void putback_movable_pages(struct list_head *l)
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               if (unlikely(balloon_page_movable(page)))
+               if (unlikely(isolated_balloon_page(page)))
                        balloon_page_putback(page);
                else
                        putback_lru_page(page);
@@ -307,10 +308,12 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
  * 2 for pages with a mapping
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
-static int migrate_page_move_mapping(struct address_space *mapping,
+int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page,
                struct buffer_head *head, enum migrate_mode mode)
 {
+       struct zone *oldzone, *newzone;
+       int dirty;
        int expected_count = 0;
        void **pslot;
 
@@ -321,6 +324,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
                return MIGRATEPAGE_SUCCESS;
        }
 
+       oldzone = page_zone(page);
+       newzone = page_zone(newpage);
+
        spin_lock_irq(&mapping->tree_lock);
 
        pslot = radix_tree_lookup_slot(&mapping->page_tree,
@@ -361,6 +367,13 @@ static int migrate_page_move_mapping(struct address_space *mapping,
                set_page_private(newpage, page_private(page));
        }
 
+       /* Move dirty while page refs frozen and newpage not yet exposed */
+       dirty = PageDirty(page);
+       if (dirty) {
+               ClearPageDirty(page);
+               SetPageDirty(newpage);
+       }
+
        radix_tree_replace_slot(pslot, newpage);
 
        /*
@@ -370,6 +383,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         */
        page_unfreeze_refs(page, expected_count - 1);
 
+       spin_unlock(&mapping->tree_lock);
+       /* Leave irq disabled to prevent preemption while updating stats */
+
        /*
         * If moved to a different zone then also account
         * the page for that zone. Other VM counters will be
@@ -380,16 +396,23 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         * via NR_FILE_PAGES and NR_ANON_PAGES if they
         * are mapped to swap space.
         */
-       __dec_zone_page_state(page, NR_FILE_PAGES);
-       __inc_zone_page_state(newpage, NR_FILE_PAGES);
-       if (!PageSwapCache(page) && PageSwapBacked(page)) {
-               __dec_zone_page_state(page, NR_SHMEM);
-               __inc_zone_page_state(newpage, NR_SHMEM);
+       if (newzone != oldzone) {
+               __dec_zone_state(oldzone, NR_FILE_PAGES);
+               __inc_zone_state(newzone, NR_FILE_PAGES);
+               if (PageSwapBacked(page) && !PageSwapCache(page)) {
+                       __dec_zone_state(oldzone, NR_SHMEM);
+                       __inc_zone_state(newzone, NR_SHMEM);
+               }
+               if (dirty && mapping_cap_account_dirty(mapping)) {
+                       __dec_zone_state(oldzone, NR_FILE_DIRTY);
+                       __inc_zone_state(newzone, NR_FILE_DIRTY);
+               }
        }
-       spin_unlock_irq(&mapping->tree_lock);
+       local_irq_enable();
 
        return MIGRATEPAGE_SUCCESS;
 }
+EXPORT_SYMBOL(migrate_page_move_mapping);
 
 /*
  * The expected number of remaining references is the same as that
@@ -460,20 +483,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
        if (PageMappedToDisk(page))
                SetPageMappedToDisk(newpage);
 
-       if (PageDirty(page)) {
-               clear_page_dirty_for_io(page);
-               /*
-                * Want to mark the page and the radix tree as dirty, and
-                * redo the accounting that clear_page_dirty_for_io undid,
-                * but we can't use set_page_dirty because that function
-                * is actually a signal that all of the page has become dirty.
-                * Whereas only part of our page may be dirty.
-                */
-               if (PageSwapBacked(page))
-                       SetPageDirty(newpage);
-               else
-                       __set_page_dirty_nobuffers(newpage);
-       }
+       /* Move dirty on pages not done by migrate_page_move_mapping() */
+       if (PageDirty(page))
+               SetPageDirty(newpage);
 
        mlock_migrate_page(newpage, page);
        ksm_migrate_page(newpage, page);
@@ -492,6 +504,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
        if (PageWriteback(newpage))
                end_page_writeback(newpage);
 }
+EXPORT_SYMBOL(migrate_page_copy);
 
 /************************************************************
  *                    Migration functions
@@ -1710,12 +1723,13 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                unlock_page(new_page);
                put_page(new_page);             /* Free it */
 
-               unlock_page(page);
+               /* Retake the callers reference and putback on LRU */
+               get_page(page);
                putback_lru_page(page);
+               mod_zone_page_state(page_zone(page),
+                        NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
 
-               count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-               isolated = 0;
-               goto out;
+               goto out_unlock;
        }
 
        /*
@@ -1732,9 +1746,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
        entry = pmd_mkhuge(entry);
 
-       page_add_new_anon_rmap(new_page, vma, haddr);
-
+       pmdp_clear_flush(vma, haddr, pmd);
        set_pmd_at(mm, haddr, pmd, entry);
+       page_add_new_anon_rmap(new_page, vma, haddr);
        update_mmu_cache_pmd(vma, address, &entry);
        page_remove_rmap(page);
        /*
@@ -1753,7 +1767,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
        count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
 
-out:
        mod_zone_page_state(page_zone(page),
                        NR_ISOLATED_ANON + page_lru,
                        -HPAGE_PMD_NR);
@@ -1762,6 +1775,11 @@ out:
 out_fail:
        count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
 out_dropref:
+       entry = pmd_mknonnuma(entry);
+       set_pmd_at(mm, haddr, pmd, entry);
+       update_mmu_cache_pmd(vma, address, &entry);
+
+out_unlock:
        unlock_page(page);
        put_page(page);
        return 0;