remove libdss from Makefile
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / memory_hotplug.c
index d4b5f29906b96465207df76897739d2eba518886..c9d3a49bd4e2090ac758bc04e5f4bfffaf1ca480 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/memblock.h>
 #include <linux/bootmem.h>
 #include <linux/compaction.h>
+#include <linux/rmap.h>
 
 #include <asm/tlbflush.h>
 
@@ -1255,7 +1256,8 @@ static struct page *next_active_pageblock(struct page *page)
 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 {
        struct page *page = pfn_to_page(start_pfn);
-       struct page *end_page = page + nr_pages;
+       unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
+       struct page *end_page = pfn_to_page(end_pfn);
 
        /* Check the starting page of each pageblock within the range */
        for (; page < end_page; page = next_active_pageblock(page)) {
@@ -1295,6 +1297,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
                                i++;
                        if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
                                continue;
+                       /* Check if we got outside of the zone */
+                       if (zone && !zone_spans_pfn(zone, pfn + i))
+                               return 0;
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
                                return 0;
@@ -1391,6 +1396,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                        pfn = page_to_pfn(compound_head(page))
                                + hpage_nr_pages(page) - 1;
 
+               /*
+                * HWPoison pages have elevated reference counts so the migration would
+                * fail on them. It also doesn't make any sense to migrate them in the
+                * first place. Still try to unmap such a page in case it is still mapped
+                * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
+                * the unmap as the catch all safety net).
+                */
+               if (PageHWPoison(page)) {
+                       if (WARN_ON(PageLRU(page)))
+                               isolate_lru_page(page);
+                       if (page_mapped(page))
+                               try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
+                       continue;
+               }
+
                if (!get_page_unless_zero(page))
                        continue;
                /*