From 29b52de182acf50f85a8284ad39104d84c9bbf57 Mon Sep 17 00:00:00 2001 From: "seokhoon.yoon" Date: Fri, 20 May 2016 16:58:47 -0700 Subject: [PATCH] mm, kasan: fix to call kasan_free_pages() after poisoning page When CONFIG_PAGE_POISONING and CONFIG_KASAN is enabled, free_pages_prepare()'s codeflow is below. 1)kmemcheck_free_shadow() 2)kasan_free_pages() - set shadow byte of page is freed 3)kernel_poison_pages() 3.1) check access to page is valid or not using kasan ---> error occur, kasan think it is invalid access 3.2) poison page 4)kernel_map_pages() So kasan_free_pages() should be called after poisoning the page. Link: http://lkml.kernel.org/r/1463220405-7455-1-git-send-email-iamyooon@gmail.com Signed-off-by: seokhoon.yoon Cc: Andrey Ryabinin Cc: Laura Abbott Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2dd1ba4e70cc..383b14b4f61d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -993,7 +993,6 @@ static __always_inline bool free_pages_prepare(struct page *page, trace_mm_page_free(page, order); kmemcheck_free_shadow(page, order); - kasan_free_pages(page, order); /* * Check tail pages before head page information is cleared to @@ -1035,6 +1034,7 @@ static __always_inline bool free_pages_prepare(struct page *page, arch_free_page(page, order); kernel_poison_pages(page, 1 << order, 0); kernel_map_pages(page, 1 << order, 0); + kasan_free_pages(page, order); return true; } -- 2.20.1