UPSTREAM: kasan: improve double-free reports
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / mm / page_alloc.c
index b9a84bd518858db2c7ad4c116e646eb013a36da5..04b450ce822d275b99aa3f2e43edf0042d0acf63 100644 (file)
@@ -279,8 +279,37 @@ EXPORT_SYMBOL(nr_online_nodes);
 int page_group_by_mobility_disabled __read_mostly;
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+
+/*
+ * Determine how many pages need to be initialized durig early boot
+ * (non-deferred initialization).
+ * The value of first_deferred_pfn will be set later, once non-deferred pages
+ * are initialized, but for now set it ULONG_MAX.
+ */
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
+       phys_addr_t start_addr, end_addr;
+       unsigned long max_pgcnt;
+       unsigned long reserved;
+
+       /*
+        * Initialise at least 2G of a node but also take into account that
+        * two large system hashes that can take up 1GB for 0.25TB/node.
+        */
+       max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
+                       (pgdat->node_spanned_pages >> 8));
+
+       /*
+        * Compensate the all the memblock reservations (e.g. crash kernel)
+        * from the initial estimation to make sure we will initialize enough
+        * memory to boot.
+        */
+       start_addr = PFN_PHYS(pgdat->node_start_pfn);
+       end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
+       reserved = memblock_reserved_memory_within(start_addr, end_addr);
+       max_pgcnt += PHYS_PFN(reserved);
+
+       pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
        pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -314,10 +343,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
        /* Always populate low zones for address-contrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
-
        /* Initialise at least 2G of the highest zone */
        (*nr_initialised)++;
-       if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
+       if ((*nr_initialised > pgdat->static_init_pgcnt) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
                pgdat->first_deferred_pfn = pfn;
                return false;
@@ -553,6 +581,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        INIT_LIST_HEAD(&page->lru);
@@ -570,6 +601,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        set_page_private(page, 0);
@@ -1520,14 +1554,14 @@ int move_freepages(struct zone *zone,
 #endif
 
        for (page = start_page; page <= end_page;) {
-               /* Make sure we are not inadvertently changing nodes */
-               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
                        continue;
                }
 
+               /* Make sure we are not inadvertently changing nodes */
+               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+
                if (!PageBuddy(page)) {
                        page++;
                        continue;
@@ -1741,13 +1775,25 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
                                                struct page, lru);
 
                        /*
-                        * It should never happen but changes to locking could
-                        * inadvertently allow a per-cpu drain to add pages
-                        * to MIGRATE_HIGHATOMIC while unreserving so be safe
-                        * and watch for underflows.
+                        * In page freeing path, migratetype change is racy so
+                        * we can counter several free pages in a pageblock
+                        * in this loop althoug we changed the pageblock type
+                        * from highatomic to ac->migratetype. So we should
+                        * adjust the count once.
                         */
-                       zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
-                               zone->nr_reserved_highatomic);
+                       if (get_pageblock_migratetype(page) ==
+                                                       MIGRATE_HIGHATOMIC) {
+                               /*
+                                * It should never happen but changes to
+                                * locking could inadvertently allow a per-cpu
+                                * drain to add pages to MIGRATE_HIGHATOMIC
+                                * while unreserving so be safe and watch for
+                                * underflows.
+                                */
+                               zone->nr_reserved_highatomic -= min(
+                                               pageblock_nr_pages,
+                                               zone->nr_reserved_highatomic);
+                       }
 
                        /*
                         * Convert to ac->migratetype and avoid the normal
@@ -3951,8 +3997,7 @@ static int __parse_numa_zonelist_order(char *s)
                user_zonelist_order = ZONELIST_ORDER_ZONE;
        } else {
                printk(KERN_WARNING
-                       "Ignoring invalid numa_zonelist_order value:  "
-                       "%s\n", s);
+                      "Ignoring invalid numa_zonelist_order value:  %s\n", s);
                return -EINVAL;
        }
        return 0;
@@ -4417,12 +4462,11 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
        else
                page_group_by_mobility_disabled = 0;
 
-       pr_info("Built %i zonelists in %s order, mobility grouping %s.  "
-               "Total pages: %ld\n",
-                       nr_online_nodes,
-                       zonelist_order_name[current_zonelist_order],
-                       page_group_by_mobility_disabled ? "off" : "on",
-                       vm_total_pages);
+       pr_info("Built %i zonelists in %s order, mobility grouping %s.  Total pages: %ld\n",
+               nr_online_nodes,
+               zonelist_order_name[current_zonelist_order],
+               page_group_by_mobility_disabled ? "off" : "on",
+               vm_total_pages);
 #ifdef CONFIG_NUMA
        pr_info("Policy zone: %s\n", zone_names[policy_zone]);
 #endif
@@ -5355,7 +5399,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        /* pg_data_t should be reset to zero when it's allocated */
        WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
 
-       reset_deferred_meminit(pgdat);
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -5374,6 +5417,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
                (unsigned long)pgdat->node_mem_map);
 #endif
 
+       reset_deferred_meminit(pgdat);
        free_area_init_core(pgdat);
 }
 
@@ -5840,8 +5884,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
        }
 
        if (pages && s)
-               pr_info("Freeing %s memory: %ldK (%p - %p)\n",
-                       s, pages << (PAGE_SHIFT - 10), start, end);
+               pr_info("Freeing %s memory: %ldK\n",
+                       s, pages << (PAGE_SHIFT - 10));
 
        return pages;
 }
@@ -5893,22 +5937,21 @@ void __init mem_init_print_info(const char *str)
 
 #undef adj_init_size
 
-       pr_info("Memory: %luK/%luK available "
-              "(%luK kernel code, %luK rwdata, %luK rodata, "
-              "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
+       pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
 #ifdef CONFIG_HIGHMEM
-              ", %luK highmem"
+               ", %luK highmem"
 #endif
-              "%s%s)\n",
-              nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
-              codesize >> 10, datasize >> 10, rosize >> 10,
-              (init_data_size + init_code_size) >> 10, bss_size >> 10,
-              (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
-              totalcma_pages << (PAGE_SHIFT-10),
+               "%s%s)\n",
+               nr_free_pages() << (PAGE_SHIFT - 10),
+               physpages << (PAGE_SHIFT - 10),
+               codesize >> 10, datasize >> 10, rosize >> 10,
+               (init_data_size + init_code_size) >> 10, bss_size >> 10,
+               (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
+               totalcma_pages << (PAGE_SHIFT - 10),
 #ifdef CONFIG_HIGHMEM
-              totalhigh_pages << (PAGE_SHIFT-10),
+               totalhigh_pages << (PAGE_SHIFT - 10),
 #endif
-              str ? ", " : "", str ? str : "");
+               str ? ", " : "", str ? str : "");
 }
 
 /**
@@ -6803,7 +6846,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /* Make sure the range is really isolated. */
        if (test_pages_isolated(outer_start, end, false)) {
-               pr_info("%s: [%lx, %lx) PFNs busy\n",
+               pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
                        __func__, outer_start, end);
                ret = -EBUSY;
                goto done;