mm: mm_event: add reclaim stat
authorMinchan Kim <minchan@google.com>
Wed, 27 Jun 2018 13:04:07 +0000 (22:04 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:01 +0000 (20:24 +0300)
This patch adds page reclaim mm_event stat so that we could
keep tracking [avg|max]_latency for the handling the event
as well as count of the event.

Direct reclaim latency is usually a most popular latency source
caused by memory pressure so we need to track it down to hunt
down application's jank problem.

Mot-CRs-fixed: (CR)

Bug: 80168800
Change-Id: I215c3972f76389404da7c4806a776bf753daac01
Signed-off-by: Minchan Kim <minchan@google.com>
Reviewed-on: https://gerrit.mot.com/1453721
SLTApproved: Slta Waiver
SME-Granted: SME Approvals Granted
Tested-by: Jira Key
Reviewed-by: Xiangpo Zhao <zhaoxp3@motorola.com>
Submit-Approved: Jira Key

mm/vmscan.c

index f73d6465eab8f18d2369279e76e97e97104e24be..a2b4f7864f4f1bb9887dcf847066bdecfb5b8328 100644 (file)
@@ -3058,6 +3058,7 @@ out:
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                gfp_t gfp_mask, nodemask_t *nodemask)
 {
+       ktime_t event_ts;
        unsigned long nr_reclaimed;
        struct scan_control sc = {
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
@@ -3079,6 +3080,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
        if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
                return 1;
 
+       mm_event_start(&event_ts);
        trace_mm_vmscan_direct_reclaim_begin(order,
                                sc.may_writepage,
                                sc.gfp_mask,
@@ -3087,6 +3089,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
        trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
+       mm_event_end(MM_RECLAIM, event_ts);
 
        return nr_reclaimed;
 }
@@ -3600,6 +3603,7 @@ static int kswapd(void *p)
        for ( ; ; ) {
                bool ret;
 
+               ktime_t event_ts;
                alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
                classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
 
@@ -3635,7 +3639,9 @@ kswapd_try_sleep:
                trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
                                                alloc_order);
                fs_reclaim_acquire(GFP_KERNEL);
+               mm_event_start(&event_ts);
                reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
+               mm_event_end(MM_RECLAIM, event_ts);
                fs_reclaim_release(GFP_KERNEL);
                if (reclaim_order < alloc_order)
                        goto kswapd_try_sleep;