From f0bc0a60b13f209df16062f94e9fb4b90dc08708 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Thu, 13 Jan 2011 15:45:50 -0800 Subject: [PATCH] vmscan: factor out kswapd sleeping logic from kswapd() Currently, kswapd() has deep nesting and is slightly hard to read. Clean this up. Signed-off-by: KOSAKI Motohiro Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 92 ++++++++++++++++++++++++++--------------------------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 86f8c341879..cacdf668497 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2371,6 +2371,50 @@ out: return sc.nr_reclaimed; } +static void kswapd_try_to_sleep(pg_data_t *pgdat, int order) +{ + long remaining = 0; + DEFINE_WAIT(wait); + + if (freezing(current) || kthread_should_stop()) + return; + + prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); + + /* Try to sleep for a short interval */ + if (!sleeping_prematurely(pgdat, order, remaining)) { + remaining = schedule_timeout(HZ/10); + finish_wait(&pgdat->kswapd_wait, &wait); + prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); + } + + /* + * After a short sleep, check if it was a premature sleep. If not, then + * go fully to sleep until explicitly woken up. + */ + if (!sleeping_prematurely(pgdat, order, remaining)) { + trace_mm_vmscan_kswapd_sleep(pgdat->node_id); + + /* + * vmstat counters are not perfectly accurate and the estimated + * value for counters such as NR_FREE_PAGES can deviate from the + * true value by nr_online_cpus * threshold. To avoid the zone + * watermarks being breached while under pressure, we reduce the + * per-cpu vmstat threshold while kswapd is awake and restore + * them before going back to sleep. + */ + set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); + schedule(); + set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); + } else { + if (remaining) + count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); + else + count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); + } + finish_wait(&pgdat->kswapd_wait, &wait); +} + /* * The background pageout daemon, started as a kernel thread * from the init process. @@ -2389,7 +2433,7 @@ static int kswapd(void *p) unsigned long order; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; - DEFINE_WAIT(wait); + struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; @@ -2421,7 +2465,6 @@ static int kswapd(void *p) unsigned long new_order; int ret; - prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); new_order = pgdat->kswapd_max_order; pgdat->kswapd_max_order = 0; if (order < new_order) { @@ -2431,52 +2474,9 @@ static int kswapd(void *p) */ order = new_order; } else { - if (!freezing(current) && !kthread_should_stop()) { - long remaining = 0; - - /* Try to sleep for a short interval */ - if (!sleeping_prematurely(pgdat, order, remaining)) { - remaining = schedule_timeout(HZ/10); - finish_wait(&pgdat->kswapd_wait, &wait); - prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); - } - - /* - * After a short sleep, check if it was a - * premature sleep. If not, then go fully - * to sleep until explicitly woken up - */ - if (!sleeping_prematurely(pgdat, order, remaining)) { - trace_mm_vmscan_kswapd_sleep(pgdat->node_id); - - /* - * vmstat counters are not perfectly - * accurate and the estimated value - * for counters such as NR_FREE_PAGES - * can deviate from the true value by - * nr_online_cpus * threshold. To - * avoid the zone watermarks being - * breached while under pressure, we - * reduce the per-cpu vmstat threshold - * while kswapd is awake and restore - * them before going back to sleep. - */ - set_pgdat_percpu_threshold(pgdat, - calculate_normal_threshold); - schedule(); - set_pgdat_percpu_threshold(pgdat, - calculate_pressure_threshold); - } else { - if (remaining) - count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); - else - count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); - } - } - + kswapd_try_to_sleep(pgdat, order); order = pgdat->kswapd_max_order; } - finish_wait(&pgdat->kswapd_wait, &wait); ret = try_to_freeze(); if (kthread_should_stop()) -- 2.20.1