From: Lai Jiangshan Date: Wed, 12 Dec 2012 00:03:20 +0000 (-0800) Subject: memory_hotplug: handle empty zone when online_movable/online_kernel X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e455a9b92d6e19a3f0f7eb6f6241efa566a7e81a;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git memory_hotplug: handle empty zone when online_movable/online_kernel Make online_movable/online_kernel can empty a zone or can move memory to a empty zone. Signed-off-by: Lai Jiangshan Signed-off-by: Wen Congyang Cc: Yasuaki Ishimatsu Cc: Lai Jiangshan Cc: Jiang Liu Cc: KOSAKI Motohiro Cc: Minchan Kim Cc: Mel Gorman Cc: David Rientjes Cc: Yinghai Lu Cc: Rusty Russell Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 5c1f4959e6b4..c370491bdb97 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -219,8 +219,17 @@ static void resize_zone(struct zone *zone, unsigned long start_pfn, { zone_span_writelock(zone); - zone->zone_start_pfn = start_pfn; - zone->spanned_pages = end_pfn - start_pfn; + if (end_pfn - start_pfn) { + zone->zone_start_pfn = start_pfn; + zone->spanned_pages = end_pfn - start_pfn; + } else { + /* + * make it consist as free_area_init_core(), + * if spanned_pages = 0, then keep start_pfn = 0 + */ + zone->zone_start_pfn = 0; + zone->spanned_pages = 0; + } zone_span_writeunlock(zone); } @@ -236,10 +245,19 @@ static void fix_zone_id(struct zone *zone, unsigned long start_pfn, set_page_links(pfn_to_page(pfn), zid, nid, pfn); } -static int move_pfn_range_left(struct zone *z1, struct zone *z2, +static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, unsigned long start_pfn, unsigned long end_pfn) { + int ret; unsigned long flags; + unsigned long z1_start_pfn; + + if (!z1->wait_table) { + ret = init_currently_empty_zone(z1, start_pfn, + end_pfn - start_pfn, MEMMAP_HOTPLUG); + if (ret) + return ret; + } pgdat_resize_lock(z1->zone_pgdat, &flags); @@ -253,7 +271,13 @@ static int move_pfn_range_left(struct zone *z1, struct zone *z2, if (end_pfn <= z2->zone_start_pfn) goto out_fail; - resize_zone(z1, z1->zone_start_pfn, end_pfn); + /* use start_pfn for z1's start_pfn if z1 is empty */ + if (z1->spanned_pages) + z1_start_pfn = z1->zone_start_pfn; + else + z1_start_pfn = start_pfn; + + resize_zone(z1, z1_start_pfn, end_pfn); resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages); pgdat_resize_unlock(z1->zone_pgdat, &flags); @@ -266,10 +290,19 @@ out_fail: return -1; } -static int move_pfn_range_right(struct zone *z1, struct zone *z2, +static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, unsigned long start_pfn, unsigned long end_pfn) { + int ret; unsigned long flags; + unsigned long z2_end_pfn; + + if (!z2->wait_table) { + ret = init_currently_empty_zone(z2, start_pfn, + end_pfn - start_pfn, MEMMAP_HOTPLUG); + if (ret) + return ret; + } pgdat_resize_lock(z1->zone_pgdat, &flags); @@ -283,8 +316,14 @@ static int move_pfn_range_right(struct zone *z1, struct zone *z2, if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages) goto out_fail; + /* use end_pfn for z2's end_pfn if z2 is empty */ + if (z2->spanned_pages) + z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages; + else + z2_end_pfn = end_pfn; + resize_zone(z1, z1->zone_start_pfn, start_pfn); - resize_zone(z2, start_pfn, z2->zone_start_pfn + z2->spanned_pages); + resize_zone(z2, start_pfn, z2_end_pfn); pgdat_resize_unlock(z1->zone_pgdat, &flags);