From cc715d99e529d470dde2f33a6614f255adea71f3 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 21 Mar 2012 16:34:00 -0700 Subject: [PATCH] mm: vmscan: forcibly scan highmem if there are too many buffer_heads pinning highmem Stuart Foster reported on bugzilla that copying large amounts of data from NTFS caused an OOM kill on 32-bit X86 with 16G of memory. Andrew Morton correctly identified that the problem was NTFS was using 512 blocks meaning each page had 8 buffer_heads in low memory pinning it. In the past, direct reclaim used to scan highmem even if the allocating process did not specify __GFP_HIGHMEM but not any more. kswapd no longer will reclaim from zones that are above the high watermark. The intention in both cases was to minimise unnecessary reclaim. The downside is on machines with large amounts of highmem that lowmem can be fully consumed by buffer_heads with nothing trying to free them. The following patch is based on a suggestion by Andrew Morton to extend the buffer_heads_over_limit case to force kswapd and direct reclaim to scan the highmem zone regardless of the allocation request or watermarks. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=42578 [hughd@google.com: move buffer_heads_over_limit check up] [akpm@linux-foundation.org: buffer_heads_over_limit is unlikely] Reported-by: Stuart Foster Tested-by: Stuart Foster Signed-off-by: Mel Gorman Signed-off-by: Hugh Dickins Cc: Johannes Weiner Cc: Rik van Riel Cc: Christoph Lameter Cc: stable Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 42 +++++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 87e4d6a6dc1..ae3bf0a09cd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1642,18 +1642,6 @@ static void move_active_pages_to_lru(struct zone *zone, unsigned long pgmoved = 0; struct page *page; - if (buffer_heads_over_limit) { - spin_unlock_irq(&zone->lru_lock); - list_for_each_entry(page, list, lru) { - if (page_has_private(page) && trylock_page(page)) { - if (page_has_private(page)) - try_to_release_page(page, 0); - unlock_page(page); - } - } - spin_lock_irq(&zone->lru_lock); - } - while (!list_empty(list)) { struct lruvec *lruvec; @@ -1735,6 +1723,14 @@ static void shrink_active_list(unsigned long nr_to_scan, continue; } + if (unlikely(buffer_heads_over_limit)) { + if (page_has_private(page) && trylock_page(page)) { + if (page_has_private(page)) + try_to_release_page(page, 0); + unlock_page(page); + } + } + if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) { nr_rotated += hpage_nr_pages(page); /* @@ -2238,6 +2234,14 @@ static bool shrink_zones(int priority, struct zonelist *zonelist, unsigned long nr_soft_scanned; bool aborted_reclaim = false; + /* + * If the number of buffer_heads in the machine exceeds the maximum + * allowed level, force direct reclaim to scan the highmem zone as + * highmem pages could be pinning lowmem pages storing buffer_heads + */ + if (buffer_heads_over_limit) + sc->gfp_mask |= __GFP_HIGHMEM; + for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(sc->gfp_mask), sc->nodemask) { if (!populated_zone(zone)) @@ -2727,6 +2731,17 @@ loop_again: */ age_active_anon(zone, &sc, priority); + /* + * If the number of buffer_heads in the machine + * exceeds the maximum allowed level and this node + * has a highmem zone, force kswapd to reclaim from + * it to relieve lowmem pressure. + */ + if (buffer_heads_over_limit && is_highmem_idx(i)) { + end_zone = i; + break; + } + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 0, 0)) { end_zone = i; @@ -2802,7 +2817,8 @@ loop_again: COMPACT_SKIPPED) testorder = 0; - if (!zone_watermark_ok_safe(zone, testorder, + if ((buffer_heads_over_limit && is_highmem_idx(i)) || + !zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + balance_gap, end_zone, 0)) { shrink_zone(priority, zone, &sc); -- 2.20.1