mm: synchronize period update interval
authorMinchan Kim <minchan@google.com>
Thu, 17 Jan 2019 02:14:14 +0000 (11:14 +0900)
committerPDO SCM Team <hudsoncm@motorola.com>
Fri, 15 Nov 2019 06:58:52 +0000 (00:58 -0600)
Wei pointed out period update is racy so it could make partial
update, which could lose a ton of trace potentially.

To close period_ms race between updating and reading, use rwlock
to reduce contention.
To close vmstat_period_ms between updating and reading,
use vmstat_lock.

This patch has small refactoring, too.

Mot-CRs-fixed: (CR)

Bug: 80168800
Change-Id: I7f84cff758b533b7881f47889c7662b743bc3c12
Signed-off-by: Minchan Kim <minchan@google.com>
Reviewed-on: https://gerrit.mot.com/1453729
SLTApproved: Slta Waiver
SME-Granted: SME Approvals Granted
Tested-by: Jira Key
Reviewed-by: Xiangpo Zhao <zhaoxp3@motorola.com>
Submit-Approved: Jira Key

mm/mm_event.c

index 967f7d1e93aec2c3a8e519a75b5955d8043f40a9..383d4bb6378a23bfb581e319e11b46e048a1cef2 100644 (file)
@@ -8,11 +8,13 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/mm_event.h>
 /* msec */
-static unsigned long period_ms = 500;
-static unsigned long vmstat_period_ms = 1000;
-static DEFINE_SPINLOCK(vmstat_lock);
+static unsigned long period_ms __read_mostly = 500;
+static unsigned long vmstat_period_ms __read_mostly = 1000;
 static unsigned long vmstat_next_period;
 
+static DEFINE_SPINLOCK(vmstat_lock);
+static DEFINE_RWLOCK(period_lock);
+
 void mm_event_task_init(struct task_struct *tsk)
 {
        memset(tsk->mm_event, 0, sizeof(tsk->mm_event));
@@ -24,12 +26,12 @@ static void record_vmstat(void)
        int cpu;
        struct mm_event_vmstat vmstat;
 
-       if (!time_is_before_eq_jiffies(vmstat_next_period))
+       if (time_is_after_jiffies(vmstat_next_period))
                return;
 
        /* Need double check under the lock */
        spin_lock(&vmstat_lock);
-       if (!time_is_before_eq_jiffies(vmstat_next_period)) {
+       if (time_is_after_jiffies(vmstat_next_period)) {
                spin_unlock(&vmstat_lock);
                return;
        }
@@ -73,23 +75,28 @@ static void record_vmstat(void)
 
 static void record_stat(void)
 {
-       if (time_is_before_eq_jiffies(current->next_period)) {
-               int i;
-               bool need_vmstat = false;
-
-               for (i = 0; i < MM_TYPE_NUM; i++) {
-                       if (current->mm_event[i].count == 0)
-                               continue;
-                       if (i == MM_COMPACTION || i == MM_RECLAIM)
-                               need_vmstat = true;
-                       trace_mm_event_record(i, &current->mm_event[i]);
-                       memset(&current->mm_event[i], 0,
-                                       sizeof(struct mm_event_task));
-               }
-               current->next_period = jiffies + msecs_to_jiffies(period_ms);
-               if (need_vmstat)
-                       record_vmstat();
+       int i;
+       bool need_vmstat = false;
+
+       if (time_is_after_jiffies(current->next_period))
+               return;
+
+       read_lock(&period_lock);
+       current->next_period = jiffies + msecs_to_jiffies(period_ms);
+       read_unlock(&period_lock);
+
+       for (i = 0; i < MM_TYPE_NUM; i++) {
+               if (current->mm_event[i].count == 0)
+                       continue;
+               if (i == MM_COMPACTION || i == MM_RECLAIM)
+                       need_vmstat = true;
+               trace_mm_event_record(i, &current->mm_event[i]);
+               memset(&current->mm_event[i], 0,
+                               sizeof(struct mm_event_task));
        }
+
+       if (need_vmstat)
+               record_vmstat();
 }
 
 void mm_event_start(ktime_t *time)
@@ -121,13 +128,18 @@ static int period_ms_set(void *data, u64 val)
        if (val < 1 || val > ULONG_MAX)
                return -EINVAL;
 
+       write_lock(&period_lock);
        period_ms = (unsigned long)val;
+       write_unlock(&period_lock);
        return 0;
 }
 
 static int period_ms_get(void *data, u64 *val)
 {
+       read_lock(&period_lock);
        *val = period_ms;
+       read_unlock(&period_lock);
+
        return 0;
 }
 
@@ -136,13 +148,17 @@ static int vmstat_period_ms_set(void *data, u64 val)
        if (val < 1 || val > ULONG_MAX)
                return -EINVAL;
 
+       spin_lock(&vmstat_lock);
        vmstat_period_ms = (unsigned long)val;
+       spin_unlock(&vmstat_lock);
        return 0;
 }
 
 static int vmstat_period_ms_get(void *data, u64 *val)
 {
+       spin_lock(&vmstat_lock);
        *val = vmstat_period_ms;
+       spin_unlock(&vmstat_lock);
        return 0;
 }