mm: Export migrate_page_move_mapping and migrate_page_copy
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / memory-failure.c
index 4254eb0215838c50252670764f0648242e6fb066..37df20faddd59c87841ad6b7c27d92dbc3d87971 100644 (file)
@@ -208,9 +208,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
 #endif
        si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
 
-       if ((flags & MF_ACTION_REQUIRED) && t == current) {
+       if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
                si.si_code = BUS_MCEERR_AR;
-               ret = force_sig_info(SIGBUS, &si, t);
+               ret = force_sig_info(SIGBUS, &si, current);
        } else {
                /*
                 * Don't use force here, it's convenient if the signal
@@ -382,10 +382,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
        }
 }
 
-static int task_early_kill(struct task_struct *tsk)
+static int task_early_kill(struct task_struct *tsk, int force_early)
 {
        if (!tsk->mm)
                return 0;
+       if (force_early)
+               return 1;
        if (tsk->flags & PF_MCE_PROCESS)
                return !!(tsk->flags & PF_MCE_EARLY);
        return sysctl_memory_failure_early_kill;
@@ -395,7 +397,7 @@ static int task_early_kill(struct task_struct *tsk)
  * Collect processes when the error hit an anonymous page.
  */
 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
-                             struct to_kill **tkc)
+                             struct to_kill **tkc, int force_early)
 {
        struct vm_area_struct *vma;
        struct task_struct *tsk;
@@ -411,7 +413,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
 
-               if (!task_early_kill(tsk))
+               if (!task_early_kill(tsk, force_early))
                        continue;
                anon_vma_interval_tree_foreach(vmac, &av->rb_root,
                                               pgoff, pgoff) {
@@ -430,7 +432,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
  * Collect processes when the error hit a file mapped page.
  */
 static void collect_procs_file(struct page *page, struct list_head *to_kill,
-                             struct to_kill **tkc)
+                             struct to_kill **tkc, int force_early)
 {
        struct vm_area_struct *vma;
        struct task_struct *tsk;
@@ -441,7 +443,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        for_each_process(tsk) {
                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
-               if (!task_early_kill(tsk))
+               if (!task_early_kill(tsk, force_early))
                        continue;
 
                vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
@@ -467,7 +469,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
  * First preallocate one tokill structure outside the spin locks,
  * so that we can kill at least one process reasonably reliable.
  */
-static void collect_procs(struct page *page, struct list_head *tokill)
+static void collect_procs(struct page *page, struct list_head *tokill,
+                               int force_early)
 {
        struct to_kill *tk;
 
@@ -478,9 +481,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
        if (!tk)
                return;
        if (PageAnon(page))
-               collect_procs_anon(page, tokill, &tk);
+               collect_procs_anon(page, tokill, &tk, force_early);
        else
-               collect_procs_file(page, tokill, &tk);
+               collect_procs_file(page, tokill, &tk, force_early);
        kfree(tk);
 }
 
@@ -965,7 +968,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
         * there's nothing that can be done.
         */
        if (kill)
-               collect_procs(ppage, &tokill);
+               collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
 
        ret = try_to_unmap(ppage, ttu);
        if (ret != SWAP_SUCCESS)
@@ -1114,10 +1117,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * The check (unnecessarily) ignores LRU pages being isolated and
         * walked by the page reclaim code, however that's not a big loss.
         */
-       if (!PageHuge(p) && !PageTransTail(p)) {
-               if (!PageLRU(p))
-                       shake_page(p, 0);
-               if (!PageLRU(p)) {
+       if (!PageHuge(p)) {
+               if (!PageLRU(hpage))
+                       shake_page(hpage, 0);
+               if (!PageLRU(hpage)) {
                        /*
                         * shake_page could have turned it free.
                         */
@@ -1153,6 +1156,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+               atomic_long_sub(nr_pages, &num_poisoned_pages);
+               put_page(hpage);
                res = 0;
                goto out;
        }
@@ -1467,7 +1472,9 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
                 * Did it turn free?
                 */
                ret = __get_any_page(page, pfn, 0);
-               if (!PageLRU(page)) {
+               if (ret == 1 && !PageLRU(page)) {
+                       /* Drop page reference which is from __get_any_page() */
+                       put_page(page);
                        pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
                                pfn, page->flags);
                        return -EIO;