unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone_end_pfn(zone);
const bool sync = cc->mode != MIGRATE_ASYNC;
+ ktime_t event_ts;
cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
cc->last_migrated_pfn = 0;
+ mm_event_start(&event_ts);
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
cc->free_pfn, end_pfn, sync);
}
out:
+ mm_event_end(MM_COMPACTION, event_ts);
/*
* Release free pages and update where the free scanner should restart,
* so we don't leave any returned pages behind in the next attempt.
struct zoneref *z;
struct zone *zone;
enum compact_result rc = COMPACT_SKIPPED;
+ ktime_t event_ts;
/*
* Check if the GFP flags allow compaction - GFP_NOIO is really
if (!may_perform_io)
return COMPACT_SKIPPED;
+ mm_event_start(&event_ts);
trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
/* Compact each zone in the list */
break;
}
+ mm_event_end(MM_COMPACTION, event_ts);
return rc;
}