mm: fix wrong kasan report [1/1]
authorTao Zeng <tao.zeng@amlogic.com>
Wed, 28 Aug 2019 07:25:40 +0000 (15:25 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Thu, 29 Aug 2019 06:36:43 +0000 (23:36 -0700)
PD#SWPL-13281

Problem:
There are 2 types of wrong kasan report after merge change of
save wasted slab.
1, slab-out-of-bounds, which is caused by krealloc set shadow
   memory out-of-range, since tail of page was freed.
2, use-after-free, which is caused by kasan_free_pages called
   after a page freed. Because this function already called in
   free_page, so it marked shadow memory twice.

Solution:
1, make shadow do not out of range if a tail page was freed and
   been realloc again.
2, remove call of kasan_free_pages.

Verify:
X301

Change-Id: Ib5bdcbb618a783920009bb97d112c361888b0d7c
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
drivers/amlogic/memory_ext/Kconfig
mm/kasan/kasan.c
mm/page_alloc.c
mm/slub.c

index 8bb640ec82e6e7afb363d1daf1a7d1fa5c64b1ca..0da0422666135febb19eb6d7be8fa1a6555b0245 100644 (file)
@@ -55,7 +55,6 @@ config AMLOGIC_KASAN32
 config AMLOGIC_VMAP
        bool "Amlogic kernel stack"
        depends on AMLOGIC_MEMORY_EXTEND
-       depends on !KASAN
        default y
        help
                This config is used to enable amlogic kernel stack
index 35408ecdae38ba4ab21df5db325239422ea7ccb5..55cd68247c50657240323ff466c10303ec7749d8 100644 (file)
@@ -641,6 +641,11 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
        redzone_start = round_up((unsigned long)(ptr + size),
                                KASAN_SHADOW_SCALE_SIZE);
        redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+       if (PageOwnerPriv1(page)) { /* end of this page was freed */
+               redzone_end = (unsigned long)ptr + PAGE_ALIGN(size);
+       }
+#endif
 
        kasan_unpoison_shadow(ptr, size);
        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
index b4539048056bb89fd1b51b3131db45695492dbf4..9c45a556630c08bacb52a8731f842c0c4663f2d1 100644 (file)
@@ -931,7 +931,17 @@ done_merging:
                }
        }
 
+#if defined(CONFIG_AMLOGIC_MEMORY_EXTEND) && defined(CONFIG_KASAN)
+       /*
+        * always put freed page to tail of buddy system, in
+        * order to increase probability of use-after-free
+        * for KASAN check.
+        */
+       list_add_tail(&page->lru,
+                     &zone->free_area[order].free_list[migratetype]);
+#else
        list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
+#endif
 out:
        zone->free_area[order].nr_free++;
 #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
@@ -2622,10 +2632,19 @@ void free_hot_cold_page(struct page *page, bool cold)
        }
 
        pcp = &this_cpu_ptr(zone->pageset)->pcp;
+#if defined(CONFIG_AMLOGIC_MEMORY_EXTEND) && defined(CONFIG_KASAN)
+       /*
+        * always put freed page to tail of buddy system, in
+        * order to increase probability of use-after-free
+        * for KASAN check.
+        */
+       list_add_tail(&page->lru, &pcp->lists[migratetype]);
+#else
        if (!cold)
                list_add(&page->lru, &pcp->lists[migratetype]);
        else
                list_add_tail(&page->lru, &pcp->lists[migratetype]);
+#endif
        pcp->count++;
        if (pcp->count >= pcp->high) {
                unsigned long batch = READ_ONCE(pcp->batch);
index 59ec5272386084693b65a03b0d84a287b0d41ef0..7dfc2a4285fa9155e2e3f65299392644a10f7dc5 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3734,7 +3734,6 @@ static void aml_slub_free_large(struct page *page, const void *obj)
                        __func__, page_address(page), nr_pages, obj);
                for (i = 0; i < nr_pages; i++)  {
                        __free_pages(page, 0);
-                       kasan_free_pages(page, 0);
                        page++;
                }
        }