f2fs: avoid wrong decrypted data from disk
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / mm / mm_event.c
1 #include <linux/mm.h>
2 #include <linux/mm_event.h>
3 #include <linux/sched.h>
4 #include <linux/vmalloc.h>
5 #include <linux/seq_file.h>
6 #include <linux/debugfs.h>
7
8 #define CREATE_TRACE_POINTS
9 #include <trace/events/mm_event.h>
10 /* msec */
11 static unsigned long period_ms __read_mostly = 500;
12 static unsigned long vmstat_period_ms __read_mostly = 1000;
13 static unsigned long vmstat_next_period;
14
15 static DEFINE_SPINLOCK(vmstat_lock);
16 static DEFINE_RWLOCK(period_lock);
17
18 void mm_event_task_init(struct task_struct *tsk)
19 {
20 memset(tsk->mm_event, 0, sizeof(tsk->mm_event));
21 tsk->next_period = 0;
22 }
23
24 static void record_vmstat(void)
25 {
26 int cpu;
27 struct mm_event_vmstat vmstat;
28
29 if (time_is_after_jiffies(vmstat_next_period))
30 return;
31
32 /* Need double check under the lock */
33 spin_lock(&vmstat_lock);
34 if (time_is_after_jiffies(vmstat_next_period)) {
35 spin_unlock(&vmstat_lock);
36 return;
37 }
38 vmstat_next_period = jiffies + msecs_to_jiffies(vmstat_period_ms);
39 spin_unlock(&vmstat_lock);
40
41 memset(&vmstat, 0, sizeof(vmstat));
42 vmstat.free = global_zone_page_state(NR_FREE_PAGES);
43 vmstat.slab = global_node_page_state(NR_SLAB_RECLAIMABLE) +
44 global_node_page_state(NR_SLAB_UNRECLAIMABLE);
45
46 vmstat.file = global_node_page_state(NR_ACTIVE_FILE) +
47 global_node_page_state(NR_INACTIVE_FILE);
48 vmstat.anon = global_node_page_state(NR_ACTIVE_ANON) +
49 global_node_page_state(NR_INACTIVE_ANON);
50
51 vmstat.ws_refault = global_node_page_state(WORKINGSET_REFAULT);
52 vmstat.ws_activate = global_node_page_state(WORKINGSET_ACTIVATE);
53 vmstat.mapped = global_node_page_state(NR_FILE_MAPPED);
54
55 for_each_online_cpu(cpu) {
56 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
57
58 /* sectors to kbytes for PGPGIN/PGPGOUT */
59 vmstat.pgin += this->event[PGPGIN] / 2;
60 vmstat.pgout += this->event[PGPGOUT] / 2;
61 vmstat.swpin += this->event[PSWPIN];
62 vmstat.swpout += this->event[PSWPOUT];
63 vmstat.reclaim_steal += this->event[PGSTEAL_DIRECT] +
64 this->event[PGSTEAL_KSWAPD];
65 vmstat.reclaim_scan += this->event[PGSCAN_DIRECT] +
66 this->event[PGSCAN_KSWAPD];
67 vmstat.compact_scan += this->event[COMPACTFREE_SCANNED] +
68 this->event[COMPACTMIGRATE_SCANNED];
69 }
70 trace_mm_event_vmstat_record(&vmstat);
71 }
72
73 static void record_stat(void)
74 {
75 int i;
76 bool need_vmstat = false;
77
78 if (time_is_after_jiffies(current->next_period))
79 return;
80
81 read_lock(&period_lock);
82 current->next_period = jiffies + msecs_to_jiffies(period_ms);
83 read_unlock(&period_lock);
84
85 for (i = 0; i < MM_TYPE_NUM; i++) {
86 if (current->mm_event[i].count == 0)
87 continue;
88 if (i == MM_COMPACTION || i == MM_RECLAIM)
89 need_vmstat = true;
90 trace_mm_event_record(i, &current->mm_event[i]);
91 memset(&current->mm_event[i], 0,
92 sizeof(struct mm_event_task));
93 }
94
95 if (need_vmstat)
96 record_vmstat();
97 }
98
99 void mm_event_start(ktime_t *time)
100 {
101 *time = ktime_get();
102 }
103
104 void mm_event_end(enum mm_event_type event, ktime_t start)
105 {
106 s64 elapsed = ktime_us_delta(ktime_get(), start);
107
108 current->mm_event[event].count++;
109 current->mm_event[event].accm_lat += elapsed;
110 if (elapsed > current->mm_event[event].max_lat)
111 current->mm_event[event].max_lat = elapsed;
112 record_stat();
113 }
114
115 void mm_event_count(enum mm_event_type event, int count)
116 {
117 current->mm_event[event].count += count;
118 record_stat();
119 }
120
121 static struct dentry *mm_event_root;
122
123 static int period_ms_set(void *data, u64 val)
124 {
125 if (val < 1 || val > ULONG_MAX)
126 return -EINVAL;
127
128 write_lock(&period_lock);
129 period_ms = (unsigned long)val;
130 write_unlock(&period_lock);
131 return 0;
132 }
133
134 static int period_ms_get(void *data, u64 *val)
135 {
136 read_lock(&period_lock);
137 *val = period_ms;
138 read_unlock(&period_lock);
139
140 return 0;
141 }
142
143 static int vmstat_period_ms_set(void *data, u64 val)
144 {
145 if (val < 1 || val > ULONG_MAX)
146 return -EINVAL;
147
148 spin_lock(&vmstat_lock);
149 vmstat_period_ms = (unsigned long)val;
150 spin_unlock(&vmstat_lock);
151 return 0;
152 }
153
154 static int vmstat_period_ms_get(void *data, u64 *val)
155 {
156 spin_lock(&vmstat_lock);
157 *val = vmstat_period_ms;
158 spin_unlock(&vmstat_lock);
159 return 0;
160 }
161
162 DEFINE_SIMPLE_ATTRIBUTE(period_ms_operations, period_ms_get,
163 period_ms_set, "%llu\n");
164 DEFINE_SIMPLE_ATTRIBUTE(vmstat_period_ms_operations, vmstat_period_ms_get,
165 vmstat_period_ms_set, "%llu\n");
166
167 static int __init mm_event_init(void)
168 {
169 struct dentry *entry;
170
171 mm_event_root = debugfs_create_dir("mm_event", NULL);
172 if (!mm_event_root) {
173 pr_warn("debugfs dir <mm_event> creation failed\n");
174 return PTR_ERR(mm_event_root);
175 }
176
177 entry = debugfs_create_file("period_ms", 0644,
178 mm_event_root, NULL, &period_ms_operations);
179
180 if (IS_ERR(entry)) {
181 pr_warn("debugfs file mm_event_task creation failed\n");
182 debugfs_remove_recursive(mm_event_root);
183 return PTR_ERR(entry);
184 }
185
186 entry = debugfs_create_file("vmstat_period_ms", 0644,
187 mm_event_root, NULL, &vmstat_period_ms_operations);
188 if (IS_ERR(entry)) {
189 pr_warn("debugfs file vmstat_mm_event_task creation failed\n");
190 debugfs_remove_recursive(mm_event_root);
191 return PTR_ERR(entry);
192 }
193
194 return 0;
195 }
196 subsys_initcall(mm_event_init);