mm: synchronize period update interval
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / mm / mm_event.c
1 #include <linux/mm.h>
2 #include <linux/mm_event.h>
3 #include <linux/sched.h>
4 #include <linux/vmalloc.h>
5 #include <linux/seq_file.h>
6 #include <linux/debugfs.h>
7
8 #define CREATE_TRACE_POINTS
9 #include <trace/events/mm_event.h>
10 /* msec */
11 static unsigned long period_ms __read_mostly = 500;
12 static unsigned long vmstat_period_ms __read_mostly = 1000;
13 static unsigned long vmstat_next_period;
14
15 static DEFINE_SPINLOCK(vmstat_lock);
16 static DEFINE_RWLOCK(period_lock);
17
18 void mm_event_task_init(struct task_struct *tsk)
19 {
20 memset(tsk->mm_event, 0, sizeof(tsk->mm_event));
21 tsk->next_period = 0;
22 }
23
24 static void record_vmstat(void)
25 {
26 int cpu;
27 struct mm_event_vmstat vmstat;
28
29 if (time_is_after_jiffies(vmstat_next_period))
30 return;
31
32 /* Need double check under the lock */
33 spin_lock(&vmstat_lock);
34 if (time_is_after_jiffies(vmstat_next_period)) {
35 spin_unlock(&vmstat_lock);
36 return;
37 }
38 vmstat_next_period = jiffies + msecs_to_jiffies(vmstat_period_ms);
39 spin_unlock(&vmstat_lock);
40
41 memset(&vmstat, 0, sizeof(vmstat));
42 vmstat.free = global_zone_page_state(NR_FREE_PAGES);
43 vmstat.slab = global_node_page_state(NR_SLAB_RECLAIMABLE) +
44 global_node_page_state(NR_SLAB_UNRECLAIMABLE);
45
46 vmstat.file = global_node_page_state(NR_ACTIVE_FILE) +
47 global_node_page_state(NR_INACTIVE_FILE);
48 vmstat.anon = global_node_page_state(NR_ACTIVE_ANON) +
49 global_node_page_state(NR_INACTIVE_ANON);
50
51 vmstat.ws_refault = global_node_page_state(WORKINGSET_REFAULT);
52 vmstat.ws_activate = global_node_page_state(WORKINGSET_ACTIVATE);
53 vmstat.mapped = global_node_page_state(NR_FILE_MAPPED);
54
55 /* No want to make lock dependency between vmstat_lock and hotplug */
56 get_online_cpus();
57 for_each_online_cpu(cpu) {
58 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
59
60 /* sectors to kbytes for PGPGIN/PGPGOUT */
61 vmstat.pgin += this->event[PGPGIN] / 2;
62 vmstat.pgout += this->event[PGPGOUT] / 2;
63 vmstat.swpin += this->event[PSWPIN];
64 vmstat.swpout += this->event[PSWPOUT];
65 vmstat.reclaim_steal += this->event[PGSTEAL_DIRECT] +
66 this->event[PGSTEAL_KSWAPD];
67 vmstat.reclaim_scan += this->event[PGSCAN_DIRECT] +
68 this->event[PGSCAN_KSWAPD];
69 vmstat.compact_scan += this->event[COMPACTFREE_SCANNED] +
70 this->event[COMPACTFREE_SCANNED];
71 }
72 put_online_cpus();
73 trace_mm_event_vmstat_record(&vmstat);
74 }
75
76 static void record_stat(void)
77 {
78 int i;
79 bool need_vmstat = false;
80
81 if (time_is_after_jiffies(current->next_period))
82 return;
83
84 read_lock(&period_lock);
85 current->next_period = jiffies + msecs_to_jiffies(period_ms);
86 read_unlock(&period_lock);
87
88 for (i = 0; i < MM_TYPE_NUM; i++) {
89 if (current->mm_event[i].count == 0)
90 continue;
91 if (i == MM_COMPACTION || i == MM_RECLAIM)
92 need_vmstat = true;
93 trace_mm_event_record(i, &current->mm_event[i]);
94 memset(&current->mm_event[i], 0,
95 sizeof(struct mm_event_task));
96 }
97
98 if (need_vmstat)
99 record_vmstat();
100 }
101
102 void mm_event_start(ktime_t *time)
103 {
104 *time = ktime_get();
105 }
106
107 void mm_event_end(enum mm_event_type event, ktime_t start)
108 {
109 s64 elapsed = ktime_us_delta(ktime_get(), start);
110
111 current->mm_event[event].count++;
112 current->mm_event[event].accm_lat += elapsed;
113 if (elapsed > current->mm_event[event].max_lat)
114 current->mm_event[event].max_lat = elapsed;
115 record_stat();
116 }
117
118 void mm_event_count(enum mm_event_type event, int count)
119 {
120 current->mm_event[event].count += count;
121 record_stat();
122 }
123
124 static struct dentry *mm_event_root;
125
126 static int period_ms_set(void *data, u64 val)
127 {
128 if (val < 1 || val > ULONG_MAX)
129 return -EINVAL;
130
131 write_lock(&period_lock);
132 period_ms = (unsigned long)val;
133 write_unlock(&period_lock);
134 return 0;
135 }
136
137 static int period_ms_get(void *data, u64 *val)
138 {
139 read_lock(&period_lock);
140 *val = period_ms;
141 read_unlock(&period_lock);
142
143 return 0;
144 }
145
146 static int vmstat_period_ms_set(void *data, u64 val)
147 {
148 if (val < 1 || val > ULONG_MAX)
149 return -EINVAL;
150
151 spin_lock(&vmstat_lock);
152 vmstat_period_ms = (unsigned long)val;
153 spin_unlock(&vmstat_lock);
154 return 0;
155 }
156
157 static int vmstat_period_ms_get(void *data, u64 *val)
158 {
159 spin_lock(&vmstat_lock);
160 *val = vmstat_period_ms;
161 spin_unlock(&vmstat_lock);
162 return 0;
163 }
164
165 DEFINE_SIMPLE_ATTRIBUTE(period_ms_operations, period_ms_get,
166 period_ms_set, "%llu\n");
167 DEFINE_SIMPLE_ATTRIBUTE(vmstat_period_ms_operations, vmstat_period_ms_get,
168 vmstat_period_ms_set, "%llu\n");
169
170 static int __init mm_event_init(void)
171 {
172 struct dentry *entry;
173
174 mm_event_root = debugfs_create_dir("mm_event", NULL);
175 if (!mm_event_root) {
176 pr_warn("debugfs dir <mm_event> creation failed\n");
177 return PTR_ERR(mm_event_root);
178 }
179
180 entry = debugfs_create_file("period_ms", 0644,
181 mm_event_root, NULL, &period_ms_operations);
182
183 if (IS_ERR(entry)) {
184 pr_warn("debugfs file mm_event_task creation failed\n");
185 debugfs_remove_recursive(mm_event_root);
186 return PTR_ERR(entry);
187 }
188
189 entry = debugfs_create_file("vmstat_period_ms", 0644,
190 mm_event_root, NULL, &vmstat_period_ms_operations);
191 if (IS_ERR(entry)) {
192 pr_warn("debugfs file vmstat_mm_event_task creation failed\n");
193 debugfs_remove_recursive(mm_event_root);
194 return PTR_ERR(entry);
195 }
196
197 return 0;
198 }
199 subsys_initcall(mm_event_init);