import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / tuxonice_alloc.c
1 /*
2 * kernel/power/tuxonice_alloc.c
3 *
4 * Copyright (C) 2008-2010 Nigel Cunningham (nigel at tuxonice net)
5 *
6 * This file is released under the GPLv2.
7 *
8 */
9
10 #ifdef CONFIG_PM_DEBUG
11 #include <linux/export.h>
12 #include <linux/slab.h>
13 #include "tuxonice_modules.h"
14 #include "tuxonice_alloc.h"
15 #include "tuxonice_sysfs.h"
16 #include "tuxonice.h"
17
18 #define TOI_ALLOC_PATHS 40
19
20 static DEFINE_MUTEX(toi_alloc_mutex);
21
22 static struct toi_module_ops toi_alloc_ops;
23
24 static int toi_fail_num;
25
26 static atomic_t toi_alloc_count[TOI_ALLOC_PATHS],
27 toi_free_count[TOI_ALLOC_PATHS],
28 toi_test_count[TOI_ALLOC_PATHS], toi_fail_count[TOI_ALLOC_PATHS];
29 static int toi_cur_allocd[TOI_ALLOC_PATHS], toi_max_allocd[TOI_ALLOC_PATHS];
30 static int cur_allocd, max_allocd;
31
32 static char *toi_alloc_desc[TOI_ALLOC_PATHS] = {
33 "", /* 0 */
34 "get_io_info_struct",
35 "extent",
36 "extent (loading chain)",
37 "userui channel",
38 "userui arg", /* 5 */
39 "attention list metadata",
40 "extra pagedir memory metadata",
41 "bdev metadata",
42 "extra pagedir memory",
43 "header_locations_read", /* 10 */
44 "bio queue",
45 "prepare_readahead",
46 "i/o buffer",
47 "writer buffer in bio_init",
48 "checksum buffer", /* 15 */
49 "compression buffer",
50 "filewriter signature op",
51 "set resume param alloc1",
52 "set resume param alloc2",
53 "debugging info buffer", /* 20 */
54 "check can resume buffer",
55 "write module config buffer",
56 "read module config buffer",
57 "write image header buffer",
58 "read pageset1 buffer", /* 25 */
59 "get_have_image_data buffer",
60 "checksum page",
61 "worker rw loop",
62 "get nonconflicting page",
63 "ps1 load addresses", /* 30 */
64 "remove swap image",
65 "swap image exists",
66 "swap parse sig location",
67 "sysfs kobj",
68 "swap mark resume attempted buffer", /* 35 */
69 "cluster member",
70 "boot kernel data buffer",
71 "setting swap signature",
72 "block i/o bdev struct"
73 };
74
75 #define MIGHT_FAIL(FAIL_NUM, FAIL_VAL) \
76 do { \
77 BUG_ON(FAIL_NUM >= TOI_ALLOC_PATHS); \
78 \
79 if (FAIL_NUM == toi_fail_num) { \
80 atomic_inc(&toi_test_count[FAIL_NUM]); \
81 toi_fail_num = 0; \
82 return FAIL_VAL; \
83 } \
84 } while (0)
85
86 static void alloc_update_stats(int fail_num, void *result, int size)
87 {
88 if (!result) {
89 atomic_inc(&toi_fail_count[fail_num]);
90 return;
91 }
92
93 atomic_inc(&toi_alloc_count[fail_num]);
94 if (unlikely(test_action_state(TOI_GET_MAX_MEM_ALLOCD))) {
95 mutex_lock(&toi_alloc_mutex);
96 toi_cur_allocd[fail_num]++;
97 cur_allocd += size;
98 if (unlikely(cur_allocd > max_allocd)) {
99 int i;
100
101 for (i = 0; i < TOI_ALLOC_PATHS; i++)
102 toi_max_allocd[i] = toi_cur_allocd[i];
103 max_allocd = cur_allocd;
104 }
105 mutex_unlock(&toi_alloc_mutex);
106 }
107 }
108
109 static void free_update_stats(int fail_num, int size)
110 {
111 BUG_ON(fail_num >= TOI_ALLOC_PATHS);
112 atomic_inc(&toi_free_count[fail_num]);
113 if (unlikely(atomic_read(&toi_free_count[fail_num]) >
114 atomic_read(&toi_alloc_count[fail_num])))
115 dump_stack();
116 if (unlikely(test_action_state(TOI_GET_MAX_MEM_ALLOCD))) {
117 mutex_lock(&toi_alloc_mutex);
118 cur_allocd -= size;
119 toi_cur_allocd[fail_num]--;
120 mutex_unlock(&toi_alloc_mutex);
121 }
122 }
123
124 void *toi_kzalloc(int fail_num, size_t size, gfp_t flags)
125 {
126 void *result;
127
128 if (toi_alloc_ops.enabled)
129 MIGHT_FAIL(fail_num, NULL);
130 result = kzalloc(size, flags);
131 if (toi_alloc_ops.enabled)
132 alloc_update_stats(fail_num, result, size);
133 if (fail_num == toi_trace_allocs)
134 dump_stack();
135 return result;
136 }
137 EXPORT_SYMBOL_GPL(toi_kzalloc);
138
139 unsigned long toi_get_free_pages(int fail_num, gfp_t mask, unsigned int order)
140 {
141 unsigned long result;
142
143 if (toi_alloc_ops.enabled)
144 MIGHT_FAIL(fail_num, 0);
145 result = __get_free_pages(mask, order);
146 if (toi_alloc_ops.enabled)
147 alloc_update_stats(fail_num, (void *)result, PAGE_SIZE << order);
148 if (fail_num == toi_trace_allocs)
149 dump_stack();
150 return result;
151 }
152 EXPORT_SYMBOL_GPL(toi_get_free_pages);
153
154 struct page *toi_alloc_page(int fail_num, gfp_t mask)
155 {
156 struct page *result;
157
158 if (toi_alloc_ops.enabled)
159 MIGHT_FAIL(fail_num, NULL);
160 result = alloc_page(mask);
161 if (toi_alloc_ops.enabled)
162 alloc_update_stats(fail_num, (void *)result, PAGE_SIZE);
163 if (fail_num == toi_trace_allocs)
164 dump_stack();
165 return result;
166 }
167 EXPORT_SYMBOL_GPL(toi_alloc_page);
168
169 unsigned long toi_get_zeroed_page(int fail_num, gfp_t mask)
170 {
171 unsigned long result;
172
173 if (toi_alloc_ops.enabled)
174 MIGHT_FAIL(fail_num, 0);
175 result = get_zeroed_page(mask);
176 if (toi_alloc_ops.enabled)
177 alloc_update_stats(fail_num, (void *)result, PAGE_SIZE);
178 if (fail_num == toi_trace_allocs)
179 dump_stack();
180 return result;
181 }
182 EXPORT_SYMBOL_GPL(toi_get_zeroed_page);
183
184 void toi_kfree(int fail_num, const void *arg, int size)
185 {
186 if (arg && toi_alloc_ops.enabled)
187 free_update_stats(fail_num, size);
188
189 if (fail_num == toi_trace_allocs)
190 dump_stack();
191 kfree(arg);
192 }
193 EXPORT_SYMBOL_GPL(toi_kfree);
194
195 void toi_free_page(int fail_num, unsigned long virt)
196 {
197 if (virt && toi_alloc_ops.enabled)
198 free_update_stats(fail_num, PAGE_SIZE);
199
200 if (fail_num == toi_trace_allocs)
201 dump_stack();
202 free_page(virt);
203 }
204 EXPORT_SYMBOL_GPL(toi_free_page);
205
206 void toi__free_page(int fail_num, struct page *page)
207 {
208 if (page && toi_alloc_ops.enabled)
209 free_update_stats(fail_num, PAGE_SIZE);
210
211 if (fail_num == toi_trace_allocs)
212 dump_stack();
213 __free_page(page);
214 }
215 EXPORT_SYMBOL_GPL(toi__free_page);
216
217 void toi_free_pages(int fail_num, struct page *page, int order)
218 {
219 if (page && toi_alloc_ops.enabled)
220 free_update_stats(fail_num, PAGE_SIZE << order);
221
222 if (fail_num == toi_trace_allocs)
223 dump_stack();
224 __free_pages(page, order);
225 }
226
227 void toi_alloc_print_debug_stats(void)
228 {
229 int i, header_done = 0;
230
231 if (!toi_alloc_ops.enabled)
232 return;
233
234 for (i = 0; i < TOI_ALLOC_PATHS; i++)
235 if (atomic_read(&toi_alloc_count[i]) != atomic_read(&toi_free_count[i])) {
236 if (!header_done) {
237 printk(KERN_INFO "Idx Allocs Frees Tests "
238 " Fails Max Description\n");
239 header_done = 1;
240 }
241
242 printk(KERN_INFO "%3d %7d %7d %7d %7d %7d %s\n", i,
243 atomic_read(&toi_alloc_count[i]),
244 atomic_read(&toi_free_count[i]),
245 atomic_read(&toi_test_count[i]),
246 atomic_read(&toi_fail_count[i]),
247 toi_max_allocd[i], toi_alloc_desc[i]);
248 }
249 }
250 EXPORT_SYMBOL_GPL(toi_alloc_print_debug_stats);
251
252 static int toi_alloc_initialise(int starting_cycle)
253 {
254 int i;
255
256 if (!starting_cycle)
257 return 0;
258
259 if (toi_trace_allocs)
260 dump_stack();
261
262 for (i = 0; i < TOI_ALLOC_PATHS; i++) {
263 atomic_set(&toi_alloc_count[i], 0);
264 atomic_set(&toi_free_count[i], 0);
265 atomic_set(&toi_test_count[i], 0);
266 atomic_set(&toi_fail_count[i], 0);
267 toi_cur_allocd[i] = 0;
268 toi_max_allocd[i] = 0;
269 };
270
271 max_allocd = 0;
272 cur_allocd = 0;
273 return 0;
274 }
275
276 static struct toi_sysfs_data sysfs_params[] = {
277 SYSFS_INT("failure_test", SYSFS_RW, &toi_fail_num, 0, 99, 0, NULL),
278 SYSFS_INT("trace", SYSFS_RW, &toi_trace_allocs, 0, TOI_ALLOC_PATHS, 0,
279 NULL),
280 SYSFS_BIT("find_max_mem_allocated", SYSFS_RW, &toi_bkd.toi_action,
281 TOI_GET_MAX_MEM_ALLOCD, 0),
282 SYSFS_INT("enabled", SYSFS_RW, &toi_alloc_ops.enabled, 0, 1, 0,
283 NULL)
284 };
285
286 static struct toi_module_ops toi_alloc_ops = {
287 .type = MISC_HIDDEN_MODULE,
288 .name = "allocation debugging",
289 .directory = "alloc",
290 .module = THIS_MODULE,
291 .early = 1,
292 .initialise = toi_alloc_initialise,
293
294 .sysfs_data = sysfs_params,
295 .num_sysfs_entries = sizeof(sysfs_params) / sizeof(struct toi_sysfs_data),
296 };
297
298 int toi_alloc_init(void)
299 {
300 int result = toi_register_module(&toi_alloc_ops);
301 return result;
302 }
303
304 void toi_alloc_exit(void)
305 {
306 toi_unregister_module(&toi_alloc_ops);
307 }
308 #endif