unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *nodemask)
{
+ ktime_t event_ts;
unsigned long nr_reclaimed;
struct scan_control sc = {
.nr_to_reclaim = SWAP_CLUSTER_MAX,
if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
return 1;
+ mm_event_start(&event_ts);
trace_mm_vmscan_direct_reclaim_begin(order,
sc.may_writepage,
sc.gfp_mask,
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
+ mm_event_end(MM_RECLAIM, event_ts);
return nr_reclaimed;
}
for ( ; ; ) {
bool ret;
+ ktime_t event_ts;
alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
alloc_order);
fs_reclaim_acquire(GFP_KERNEL);
+ mm_event_start(&event_ts);
reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
+ mm_event_end(MM_RECLAIM, event_ts);
fs_reclaim_release(GFP_KERNEL);
if (reclaim_order < alloc_order)
goto kswapd_try_sleep;