From 87bba3c932721dd8cfdb629c9ebc8d171acc35f1 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 17 Jan 2019 11:14:14 +0900 Subject: [PATCH] mm: synchronize period update interval Wei pointed out period update is racy so it could make partial update, which could lose a ton of trace potentially. To close period_ms race between updating and reading, use rwlock to reduce contention. To close vmstat_period_ms between updating and reading, use vmstat_lock. This patch has small refactoring, too. Mot-CRs-fixed: (CR) Bug: 80168800 Change-Id: I7f84cff758b533b7881f47889c7662b743bc3c12 Signed-off-by: Minchan Kim Reviewed-on: https://gerrit.mot.com/1453729 SLTApproved: Slta Waiver SME-Granted: SME Approvals Granted Tested-by: Jira Key Reviewed-by: Xiangpo Zhao Submit-Approved: Jira Key --- mm/mm_event.c | 58 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 21 deletions(-) diff --git a/mm/mm_event.c b/mm/mm_event.c index 967f7d1e93ae..383d4bb6378a 100644 --- a/mm/mm_event.c +++ b/mm/mm_event.c @@ -8,11 +8,13 @@ #define CREATE_TRACE_POINTS #include /* msec */ -static unsigned long period_ms = 500; -static unsigned long vmstat_period_ms = 1000; -static DEFINE_SPINLOCK(vmstat_lock); +static unsigned long period_ms __read_mostly = 500; +static unsigned long vmstat_period_ms __read_mostly = 1000; static unsigned long vmstat_next_period; +static DEFINE_SPINLOCK(vmstat_lock); +static DEFINE_RWLOCK(period_lock); + void mm_event_task_init(struct task_struct *tsk) { memset(tsk->mm_event, 0, sizeof(tsk->mm_event)); @@ -24,12 +26,12 @@ static void record_vmstat(void) int cpu; struct mm_event_vmstat vmstat; - if (!time_is_before_eq_jiffies(vmstat_next_period)) + if (time_is_after_jiffies(vmstat_next_period)) return; /* Need double check under the lock */ spin_lock(&vmstat_lock); - if (!time_is_before_eq_jiffies(vmstat_next_period)) { + if (time_is_after_jiffies(vmstat_next_period)) { spin_unlock(&vmstat_lock); return; } @@ -73,23 +75,28 @@ static void record_vmstat(void) static void record_stat(void) { - if (time_is_before_eq_jiffies(current->next_period)) { - int i; - bool need_vmstat = false; - - for (i = 0; i < MM_TYPE_NUM; i++) { - if (current->mm_event[i].count == 0) - continue; - if (i == MM_COMPACTION || i == MM_RECLAIM) - need_vmstat = true; - trace_mm_event_record(i, ¤t->mm_event[i]); - memset(¤t->mm_event[i], 0, - sizeof(struct mm_event_task)); - } - current->next_period = jiffies + msecs_to_jiffies(period_ms); - if (need_vmstat) - record_vmstat(); + int i; + bool need_vmstat = false; + + if (time_is_after_jiffies(current->next_period)) + return; + + read_lock(&period_lock); + current->next_period = jiffies + msecs_to_jiffies(period_ms); + read_unlock(&period_lock); + + for (i = 0; i < MM_TYPE_NUM; i++) { + if (current->mm_event[i].count == 0) + continue; + if (i == MM_COMPACTION || i == MM_RECLAIM) + need_vmstat = true; + trace_mm_event_record(i, ¤t->mm_event[i]); + memset(¤t->mm_event[i], 0, + sizeof(struct mm_event_task)); } + + if (need_vmstat) + record_vmstat(); } void mm_event_start(ktime_t *time) @@ -121,13 +128,18 @@ static int period_ms_set(void *data, u64 val) if (val < 1 || val > ULONG_MAX) return -EINVAL; + write_lock(&period_lock); period_ms = (unsigned long)val; + write_unlock(&period_lock); return 0; } static int period_ms_get(void *data, u64 *val) { + read_lock(&period_lock); *val = period_ms; + read_unlock(&period_lock); + return 0; } @@ -136,13 +148,17 @@ static int vmstat_period_ms_set(void *data, u64 val) if (val < 1 || val > ULONG_MAX) return -EINVAL; + spin_lock(&vmstat_lock); vmstat_period_ms = (unsigned long)val; + spin_unlock(&vmstat_lock); return 0; } static int vmstat_period_ms_get(void *data, u64 *val) { + spin_lock(&vmstat_lock); *val = vmstat_period_ms; + spin_unlock(&vmstat_lock); return 0; } -- 2.20.1