mm: enable page poisoning early at boot
authorVinayak Menon <vinmenon@codeaurora.org>
Wed, 3 May 2017 21:54:42 +0000 (14:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 May 2017 22:52:10 +0000 (15:52 -0700)
On SPARSEMEM systems page poisoning is enabled after buddy is up,
because of the dependency on page extension init.  This causes the pages
released by free_all_bootmem not to be poisoned.  This either delays or
misses the identification of some issues because the pages have to
undergo another cycle of alloc-free-alloc for any corruption to be
detected.

Enable page poisoning early by getting rid of the PAGE_EXT_DEBUG_POISON
flag.  Since all the free pages will now be poisoned, the flag need not
be verified before checking the poison during an alloc.

[vinmenon@codeaurora.org: fix Kconfig]
Link: http://lkml.kernel.org/r/1490878002-14423-1-git-send-email-vinmenon@codeaurora.org
Link: http://lkml.kernel.org/r/1490358246-11001-1-git-send-email-vinmenon@codeaurora.org
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Acked-by: Laura Abbott <labbott@redhat.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/Kconfig.debug
mm/page_alloc.c
mm/page_ext.c
mm/page_poison.c

index 695da2a19b4cbb355810b88df2095b6c3e242801..5d22e69f51ea6ff5f7d3fffef5239fd84bf60d6c 100644 (file)
@@ -2487,7 +2487,6 @@ extern long copy_huge_page_from_user(struct page *dst_page,
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 extern struct page_ext_operations debug_guardpage_ops;
-extern struct page_ext_operations page_poisoning_ops;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 extern unsigned int _debug_guardpage_minorder;
index 79d0fd13b5b3c1a826f472398fd60a1ae1cb5da6..5b0adf1435de7dba61e7d0b34c6a7fd912041fd1 100644 (file)
@@ -42,7 +42,6 @@ config DEBUG_PAGEALLOC_ENABLE_DEFAULT
 
 config PAGE_POISONING
        bool "Poison pages after freeing"
-       select PAGE_EXTENSION
        select PAGE_POISONING_NO_SANITY if HIBERNATION
        ---help---
          Fill the pages with poison patterns after free_pages() and verify
index 465391811c2eca8f464b771611a979d498064b9a..f1f225608413fc6a144be4c0fb07cdd55e350fb4 100644 (file)
@@ -1689,10 +1689,10 @@ static inline int check_new_page(struct page *page)
        return 1;
 }
 
-static inline bool free_pages_prezeroed(bool poisoned)
+static inline bool free_pages_prezeroed(void)
 {
        return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
-               page_poisoning_enabled() && poisoned;
+               page_poisoning_enabled();
 }
 
 #ifdef CONFIG_DEBUG_VM
@@ -1746,17 +1746,10 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
                                                        unsigned int alloc_flags)
 {
        int i;
-       bool poisoned = true;
-
-       for (i = 0; i < (1 << order); i++) {
-               struct page *p = page + i;
-               if (poisoned)
-                       poisoned &= page_is_poisoned(p);
-       }
 
        post_alloc_hook(page, order, gfp_flags);
 
-       if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
+       if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
                for (i = 0; i < (1 << order); i++)
                        clear_highpage(page + i);
 
index 121dcffc4ec1768a6fc71dda8af878aa0d16cb92..88ccc044b09a41504fb212afdb5ca8a45e842998 100644 (file)
@@ -59,9 +59,6 @@
 
 static struct page_ext_operations *page_ext_ops[] = {
        &debug_guardpage_ops,
-#ifdef CONFIG_PAGE_POISONING
-       &page_poisoning_ops,
-#endif
 #ifdef CONFIG_PAGE_OWNER
        &page_owner_ops,
 #endif
@@ -127,15 +124,12 @@ struct page_ext *lookup_page_ext(struct page *page)
        struct page_ext *base;
 
        base = NODE_DATA(page_to_nid(page))->node_page_ext;
-#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
+#if defined(CONFIG_DEBUG_VM)
        /*
         * The sanity checks the page allocator does upon freeing a
         * page can reach here before the page_ext arrays are
         * allocated when feeding a range of pages to the allocator
         * for the first time during bootup or memory hotplug.
-        *
-        * This check is also necessary for ensuring page poisoning
-        * works as expected when enabled
         */
        if (unlikely(!base))
                return NULL;
@@ -204,15 +198,12 @@ struct page_ext *lookup_page_ext(struct page *page)
 {
        unsigned long pfn = page_to_pfn(page);
        struct mem_section *section = __pfn_to_section(pfn);
-#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
+#if defined(CONFIG_DEBUG_VM)
        /*
         * The sanity checks the page allocator does upon freeing a
         * page can reach here before the page_ext arrays are
         * allocated when feeding a range of pages to the allocator
         * for the first time during bootup or memory hotplug.
-        *
-        * This check is also necessary for ensuring page poisoning
-        * works as expected when enabled
         */
        if (!section->page_ext)
                return NULL;
index 2e647c65916b91b00177837370e210d71c568f5e..be19e989ccff51f667c9698c9cb8fea3d5303f8e 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/poison.h>
 #include <linux/ratelimit.h>
 
-static bool __page_poisoning_enabled __read_mostly;
 static bool want_page_poisoning __read_mostly;
 
 static int early_page_poison_param(char *buf)
@@ -18,75 +17,22 @@ static int early_page_poison_param(char *buf)
 early_param("page_poison", early_page_poison_param);
 
 bool page_poisoning_enabled(void)
-{
-       return __page_poisoning_enabled;
-}
-
-static bool need_page_poisoning(void)
-{
-       return want_page_poisoning;
-}
-
-static void init_page_poisoning(void)
 {
        /*
-        * page poisoning is debug page alloc for some arches. If either
-        * of those options are enabled, enable poisoning
+        * Assumes that debug_pagealloc_enabled is set before
+        * free_all_bootmem.
+        * Page poisoning is debug page alloc for some arches. If
+        * either of those options are enabled, enable poisoning.
         */
-       if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) {
-               if (!want_page_poisoning && !debug_pagealloc_enabled())
-                       return;
-       } else {
-               if (!want_page_poisoning)
-                       return;
-       }
-
-       __page_poisoning_enabled = true;
-}
-
-struct page_ext_operations page_poisoning_ops = {
-       .need = need_page_poisoning,
-       .init = init_page_poisoning,
-};
-
-static inline void set_page_poison(struct page *page)
-{
-       struct page_ext *page_ext;
-
-       page_ext = lookup_page_ext(page);
-       if (unlikely(!page_ext))
-               return;
-
-       __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
-}
-
-static inline void clear_page_poison(struct page *page)
-{
-       struct page_ext *page_ext;
-
-       page_ext = lookup_page_ext(page);
-       if (unlikely(!page_ext))
-               return;
-
-       __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
-}
-
-bool page_is_poisoned(struct page *page)
-{
-       struct page_ext *page_ext;
-
-       page_ext = lookup_page_ext(page);
-       if (unlikely(!page_ext))
-               return false;
-
-       return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
+       return (want_page_poisoning ||
+               (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
+               debug_pagealloc_enabled()));
 }
 
 static void poison_page(struct page *page)
 {
        void *addr = kmap_atomic(page);
 
-       set_page_poison(page);
        memset(addr, PAGE_POISON, PAGE_SIZE);
        kunmap_atomic(addr);
 }
@@ -140,12 +86,13 @@ static void unpoison_page(struct page *page)
 {
        void *addr;
 
-       if (!page_is_poisoned(page))
-               return;
-
        addr = kmap_atomic(page);
+       /*
+        * Page poisoning when enabled poisons each and every page
+        * that is freed to buddy. Thus no extra check is done to
+        * see if a page was posioned.
+        */
        check_poison_mem(addr, PAGE_SIZE);
-       clear_page_poison(page);
        kunmap_atomic(addr);
 }