kmemcheck: add opcode self-testing at boot
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / profile.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
7 *
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
9 * Red Hat, July 2004
10 * Consolidation of architecture support code for profiling,
11 * William Irwin, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
14 */
15
1da177e4
LT
16#include <linux/module.h>
17#include <linux/profile.h>
18#include <linux/bootmem.h>
19#include <linux/notifier.h>
20#include <linux/mm.h>
21#include <linux/cpumask.h>
22#include <linux/cpu.h>
1da177e4 23#include <linux/highmem.h>
97d1f15b 24#include <linux/mutex.h>
22b8ce94
DH
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
1da177e4 27#include <asm/sections.h>
7d12e780 28#include <asm/irq_regs.h>
e8edc6e0 29#include <asm/ptrace.h>
1da177e4
LT
30
31struct profile_hit {
32 u32 pc, hits;
33};
34#define PROFILE_GRPSHIFT 3
35#define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
36#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
37#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
38
39/* Oprofile timer tick hook */
b012d346 40static int (*timer_hook)(struct pt_regs *) __read_mostly;
1da177e4
LT
41
42static atomic_t *prof_buffer;
43static unsigned long prof_len, prof_shift;
07031e14 44
ece8a684 45int prof_on __read_mostly;
07031e14
IM
46EXPORT_SYMBOL_GPL(prof_on);
47
c309b917 48static cpumask_var_t prof_cpu_mask;
1da177e4
LT
49#ifdef CONFIG_SMP
50static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
51static DEFINE_PER_CPU(int, cpu_profile_flip);
97d1f15b 52static DEFINE_MUTEX(profile_flip_mutex);
1da177e4
LT
53#endif /* CONFIG_SMP */
54
22b8ce94 55int profile_setup(char *str)
1da177e4 56{
22b8ce94
DH
57 static char schedstr[] = "schedule";
58 static char sleepstr[] = "sleep";
59 static char kvmstr[] = "kvm";
1da177e4
LT
60 int par;
61
ece8a684 62 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
b3da2a73 63#ifdef CONFIG_SCHEDSTATS
ece8a684
IM
64 prof_on = SLEEP_PROFILING;
65 if (str[strlen(sleepstr)] == ',')
66 str += strlen(sleepstr) + 1;
67 if (get_option(&str, &par))
68 prof_shift = par;
69 printk(KERN_INFO
70 "kernel sleep profiling enabled (shift: %ld)\n",
71 prof_shift);
b3da2a73
MG
72#else
73 printk(KERN_WARNING
74 "kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
75#endif /* CONFIG_SCHEDSTATS */
a75acf85 76 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
1da177e4 77 prof_on = SCHED_PROFILING;
dfaa9c94
WLII
78 if (str[strlen(schedstr)] == ',')
79 str += strlen(schedstr) + 1;
80 if (get_option(&str, &par))
81 prof_shift = par;
82 printk(KERN_INFO
83 "kernel schedule profiling enabled (shift: %ld)\n",
84 prof_shift);
07031e14
IM
85 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
86 prof_on = KVM_PROFILING;
87 if (str[strlen(kvmstr)] == ',')
88 str += strlen(kvmstr) + 1;
89 if (get_option(&str, &par))
90 prof_shift = par;
91 printk(KERN_INFO
92 "kernel KVM profiling enabled (shift: %ld)\n",
93 prof_shift);
dfaa9c94 94 } else if (get_option(&str, &par)) {
1da177e4
LT
95 prof_shift = par;
96 prof_on = CPU_PROFILING;
97 printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
98 prof_shift);
99 }
100 return 1;
101}
102__setup("profile=", profile_setup);
103
104
ce05fcc3 105int __ref profile_init(void)
1da177e4 106{
22b8ce94 107 int buffer_bytes;
1ad82fd5 108 if (!prof_on)
22b8ce94 109 return 0;
1ad82fd5 110
1da177e4
LT
111 /* only text is profiled */
112 prof_len = (_etext - _stext) >> prof_shift;
22b8ce94 113 buffer_bytes = prof_len*sizeof(atomic_t);
22b8ce94 114
c309b917
RR
115 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
116 return -ENOMEM;
117
acd89579
HD
118 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
119
22b8ce94
DH
120 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
121 if (prof_buffer)
122 return 0;
123
124 prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
125 if (prof_buffer)
126 return 0;
127
128 prof_buffer = vmalloc(buffer_bytes);
129 if (prof_buffer)
130 return 0;
131
c309b917 132 free_cpumask_var(prof_cpu_mask);
22b8ce94 133 return -ENOMEM;
1da177e4
LT
134}
135
136/* Profile event notifications */
1ad82fd5 137
e041c683
AS
138static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
139static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
140static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
1ad82fd5
PC
141
142void profile_task_exit(struct task_struct *task)
1da177e4 143{
e041c683 144 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
1da177e4 145}
1ad82fd5
PC
146
147int profile_handoff_task(struct task_struct *task)
1da177e4
LT
148{
149 int ret;
e041c683 150 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
1da177e4
LT
151 return (ret == NOTIFY_OK) ? 1 : 0;
152}
153
154void profile_munmap(unsigned long addr)
155{
e041c683 156 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
1da177e4
LT
157}
158
1ad82fd5 159int task_handoff_register(struct notifier_block *n)
1da177e4 160{
e041c683 161 return atomic_notifier_chain_register(&task_free_notifier, n);
1da177e4 162}
1ad82fd5 163EXPORT_SYMBOL_GPL(task_handoff_register);
1da177e4 164
1ad82fd5 165int task_handoff_unregister(struct notifier_block *n)
1da177e4 166{
e041c683 167 return atomic_notifier_chain_unregister(&task_free_notifier, n);
1da177e4 168}
1ad82fd5 169EXPORT_SYMBOL_GPL(task_handoff_unregister);
1da177e4 170
1ad82fd5 171int profile_event_register(enum profile_type type, struct notifier_block *n)
1da177e4
LT
172{
173 int err = -EINVAL;
1ad82fd5 174
1da177e4 175 switch (type) {
1ad82fd5
PC
176 case PROFILE_TASK_EXIT:
177 err = blocking_notifier_chain_register(
178 &task_exit_notifier, n);
179 break;
180 case PROFILE_MUNMAP:
181 err = blocking_notifier_chain_register(
182 &munmap_notifier, n);
183 break;
1da177e4 184 }
1ad82fd5 185
1da177e4
LT
186 return err;
187}
1ad82fd5 188EXPORT_SYMBOL_GPL(profile_event_register);
1da177e4 189
1ad82fd5 190int profile_event_unregister(enum profile_type type, struct notifier_block *n)
1da177e4
LT
191{
192 int err = -EINVAL;
1ad82fd5 193
1da177e4 194 switch (type) {
1ad82fd5
PC
195 case PROFILE_TASK_EXIT:
196 err = blocking_notifier_chain_unregister(
197 &task_exit_notifier, n);
198 break;
199 case PROFILE_MUNMAP:
200 err = blocking_notifier_chain_unregister(
201 &munmap_notifier, n);
202 break;
1da177e4
LT
203 }
204
1da177e4
LT
205 return err;
206}
1ad82fd5 207EXPORT_SYMBOL_GPL(profile_event_unregister);
1da177e4
LT
208
209int register_timer_hook(int (*hook)(struct pt_regs *))
210{
211 if (timer_hook)
212 return -EBUSY;
213 timer_hook = hook;
214 return 0;
215}
1ad82fd5 216EXPORT_SYMBOL_GPL(register_timer_hook);
1da177e4
LT
217
218void unregister_timer_hook(int (*hook)(struct pt_regs *))
219{
220 WARN_ON(hook != timer_hook);
221 timer_hook = NULL;
222 /* make sure all CPUs see the NULL hook */
fbd568a3 223 synchronize_sched(); /* Allow ongoing interrupts to complete. */
1da177e4 224}
1da177e4 225EXPORT_SYMBOL_GPL(unregister_timer_hook);
1da177e4 226
1da177e4
LT
227
228#ifdef CONFIG_SMP
229/*
230 * Each cpu has a pair of open-addressed hashtables for pending
231 * profile hits. read_profile() IPI's all cpus to request them
232 * to flip buffers and flushes their contents to prof_buffer itself.
233 * Flip requests are serialized by the profile_flip_mutex. The sole
234 * use of having a second hashtable is for avoiding cacheline
235 * contention that would otherwise happen during flushes of pending
236 * profile hits required for the accuracy of reported profile hits
237 * and so resurrect the interrupt livelock issue.
238 *
239 * The open-addressed hashtables are indexed by profile buffer slot
240 * and hold the number of pending hits to that profile buffer slot on
241 * a cpu in an entry. When the hashtable overflows, all pending hits
242 * are accounted to their corresponding profile buffer slots with
243 * atomic_add() and the hashtable emptied. As numerous pending hits
244 * may be accounted to a profile buffer slot in a hashtable entry,
245 * this amortizes a number of atomic profile buffer increments likely
246 * to be far larger than the number of entries in the hashtable,
247 * particularly given that the number of distinct profile buffer
248 * positions to which hits are accounted during short intervals (e.g.
249 * several seconds) is usually very small. Exclusion from buffer
250 * flipping is provided by interrupt disablement (note that for
ece8a684
IM
251 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
252 * process context).
1da177e4
LT
253 * The hash function is meant to be lightweight as opposed to strong,
254 * and was vaguely inspired by ppc64 firmware-supported inverted
255 * pagetable hash functions, but uses a full hashtable full of finite
256 * collision chains, not just pairs of them.
257 *
258 * -- wli
259 */
260static void __profile_flip_buffers(void *unused)
261{
262 int cpu = smp_processor_id();
263
264 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
265}
266
267static void profile_flip_buffers(void)
268{
269 int i, j, cpu;
270
97d1f15b 271 mutex_lock(&profile_flip_mutex);
1da177e4
LT
272 j = per_cpu(cpu_profile_flip, get_cpu());
273 put_cpu();
15c8b6c1 274 on_each_cpu(__profile_flip_buffers, NULL, 1);
1da177e4
LT
275 for_each_online_cpu(cpu) {
276 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
277 for (i = 0; i < NR_PROFILE_HIT; ++i) {
278 if (!hits[i].hits) {
279 if (hits[i].pc)
280 hits[i].pc = 0;
281 continue;
282 }
283 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
284 hits[i].hits = hits[i].pc = 0;
285 }
286 }
97d1f15b 287 mutex_unlock(&profile_flip_mutex);
1da177e4
LT
288}
289
290static void profile_discard_flip_buffers(void)
291{
292 int i, cpu;
293
97d1f15b 294 mutex_lock(&profile_flip_mutex);
1da177e4
LT
295 i = per_cpu(cpu_profile_flip, get_cpu());
296 put_cpu();
15c8b6c1 297 on_each_cpu(__profile_flip_buffers, NULL, 1);
1da177e4
LT
298 for_each_online_cpu(cpu) {
299 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
300 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
301 }
97d1f15b 302 mutex_unlock(&profile_flip_mutex);
1da177e4
LT
303}
304
ece8a684 305void profile_hits(int type, void *__pc, unsigned int nr_hits)
1da177e4
LT
306{
307 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
308 int i, j, cpu;
309 struct profile_hit *hits;
310
311 if (prof_on != type || !prof_buffer)
312 return;
313 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
314 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
315 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
316 cpu = get_cpu();
317 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
318 if (!hits) {
319 put_cpu();
320 return;
321 }
ece8a684
IM
322 /*
323 * We buffer the global profiler buffer into a per-CPU
324 * queue and thus reduce the number of global (and possibly
325 * NUMA-alien) accesses. The write-queue is self-coalescing:
326 */
1da177e4
LT
327 local_irq_save(flags);
328 do {
329 for (j = 0; j < PROFILE_GRPSZ; ++j) {
330 if (hits[i + j].pc == pc) {
ece8a684 331 hits[i + j].hits += nr_hits;
1da177e4
LT
332 goto out;
333 } else if (!hits[i + j].hits) {
334 hits[i + j].pc = pc;
ece8a684 335 hits[i + j].hits = nr_hits;
1da177e4
LT
336 goto out;
337 }
338 }
339 i = (i + secondary) & (NR_PROFILE_HIT - 1);
340 } while (i != primary);
ece8a684
IM
341
342 /*
343 * Add the current hit(s) and flush the write-queue out
344 * to the global buffer:
345 */
346 atomic_add(nr_hits, &prof_buffer[pc]);
1da177e4
LT
347 for (i = 0; i < NR_PROFILE_HIT; ++i) {
348 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
349 hits[i].pc = hits[i].hits = 0;
350 }
351out:
352 local_irq_restore(flags);
353 put_cpu();
354}
355
84196414 356static int __cpuinit profile_cpu_callback(struct notifier_block *info,
1da177e4
LT
357 unsigned long action, void *__cpu)
358{
359 int node, cpu = (unsigned long)__cpu;
360 struct page *page;
361
362 switch (action) {
363 case CPU_UP_PREPARE:
8bb78442 364 case CPU_UP_PREPARE_FROZEN:
1da177e4
LT
365 node = cpu_to_node(cpu);
366 per_cpu(cpu_profile_flip, cpu) = 0;
367 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
fbd98167 368 page = alloc_pages_node(node,
4199cfa0 369 GFP_KERNEL | __GFP_ZERO,
fbd98167 370 0);
1da177e4
LT
371 if (!page)
372 return NOTIFY_BAD;
373 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
374 }
375 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
fbd98167 376 page = alloc_pages_node(node,
4199cfa0 377 GFP_KERNEL | __GFP_ZERO,
fbd98167 378 0);
1da177e4
LT
379 if (!page)
380 goto out_free;
381 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
382 }
383 break;
1ad82fd5 384out_free:
1da177e4
LT
385 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
386 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
387 __free_page(page);
388 return NOTIFY_BAD;
389 case CPU_ONLINE:
8bb78442 390 case CPU_ONLINE_FROZEN:
c309b917
RR
391 if (prof_cpu_mask != NULL)
392 cpumask_set_cpu(cpu, prof_cpu_mask);
1da177e4
LT
393 break;
394 case CPU_UP_CANCELED:
8bb78442 395 case CPU_UP_CANCELED_FROZEN:
1da177e4 396 case CPU_DEAD:
8bb78442 397 case CPU_DEAD_FROZEN:
c309b917
RR
398 if (prof_cpu_mask != NULL)
399 cpumask_clear_cpu(cpu, prof_cpu_mask);
1da177e4
LT
400 if (per_cpu(cpu_profile_hits, cpu)[0]) {
401 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
402 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
403 __free_page(page);
404 }
405 if (per_cpu(cpu_profile_hits, cpu)[1]) {
406 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
407 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
408 __free_page(page);
409 }
410 break;
411 }
412 return NOTIFY_OK;
413}
1da177e4
LT
414#else /* !CONFIG_SMP */
415#define profile_flip_buffers() do { } while (0)
416#define profile_discard_flip_buffers() do { } while (0)
02316067 417#define profile_cpu_callback NULL
1da177e4 418
ece8a684 419void profile_hits(int type, void *__pc, unsigned int nr_hits)
1da177e4
LT
420{
421 unsigned long pc;
422
423 if (prof_on != type || !prof_buffer)
424 return;
425 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
ece8a684 426 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
1da177e4
LT
427}
428#endif /* !CONFIG_SMP */
bbe1a59b
AM
429EXPORT_SYMBOL_GPL(profile_hits);
430
7d12e780 431void profile_tick(int type)
1da177e4 432{
7d12e780
DH
433 struct pt_regs *regs = get_irq_regs();
434
1da177e4
LT
435 if (type == CPU_PROFILING && timer_hook)
436 timer_hook(regs);
c309b917
RR
437 if (!user_mode(regs) && prof_cpu_mask != NULL &&
438 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
1da177e4
LT
439 profile_hit(type, (void *)profile_pc(regs));
440}
441
442#ifdef CONFIG_PROC_FS
443#include <linux/proc_fs.h>
444#include <asm/uaccess.h>
1da177e4 445
1ad82fd5 446static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
1da177e4
LT
447 int count, int *eof, void *data)
448{
c309b917 449 int len = cpumask_scnprintf(page, count, data);
1da177e4
LT
450 if (count - len < 2)
451 return -EINVAL;
452 len += sprintf(page + len, "\n");
453 return len;
454}
455
1ad82fd5
PC
456static int prof_cpu_mask_write_proc(struct file *file,
457 const char __user *buffer, unsigned long count, void *data)
1da177e4 458{
c309b917 459 struct cpumask *mask = data;
1da177e4 460 unsigned long full_count = count, err;
c309b917 461 cpumask_var_t new_value;
1da177e4 462
c309b917
RR
463 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
464 return -ENOMEM;
1da177e4 465
c309b917
RR
466 err = cpumask_parse_user(buffer, count, new_value);
467 if (!err) {
468 cpumask_copy(mask, new_value);
469 err = full_count;
470 }
471 free_cpumask_var(new_value);
472 return err;
1da177e4
LT
473}
474
475void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
476{
477 struct proc_dir_entry *entry;
478
479 /* create /proc/irq/prof_cpu_mask */
1ad82fd5
PC
480 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
481 if (!entry)
1da177e4 482 return;
c309b917 483 entry->data = prof_cpu_mask;
1da177e4
LT
484 entry->read_proc = prof_cpu_mask_read_proc;
485 entry->write_proc = prof_cpu_mask_write_proc;
486}
487
488/*
489 * This function accesses profiling information. The returned data is
490 * binary: the sampling step and the actual contents of the profile
491 * buffer. Use of the program readprofile is recommended in order to
492 * get meaningful info out of these data.
493 */
494static ssize_t
495read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
496{
497 unsigned long p = *ppos;
498 ssize_t read;
1ad82fd5 499 char *pnt;
1da177e4
LT
500 unsigned int sample_step = 1 << prof_shift;
501
502 profile_flip_buffers();
503 if (p >= (prof_len+1)*sizeof(unsigned int))
504 return 0;
505 if (count > (prof_len+1)*sizeof(unsigned int) - p)
506 count = (prof_len+1)*sizeof(unsigned int) - p;
507 read = 0;
508
509 while (p < sizeof(unsigned int) && count > 0) {
1ad82fd5 510 if (put_user(*((char *)(&sample_step)+p), buf))
064b022c 511 return -EFAULT;
1da177e4
LT
512 buf++; p++; count--; read++;
513 }
514 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
1ad82fd5 515 if (copy_to_user(buf, (void *)pnt, count))
1da177e4
LT
516 return -EFAULT;
517 read += count;
518 *ppos += read;
519 return read;
520}
521
522/*
523 * Writing to /proc/profile resets the counters
524 *
525 * Writing a 'profiling multiplier' value into it also re-sets the profiling
526 * interrupt frequency, on architectures that support this.
527 */
528static ssize_t write_profile(struct file *file, const char __user *buf,
529 size_t count, loff_t *ppos)
530{
531#ifdef CONFIG_SMP
1ad82fd5 532 extern int setup_profiling_timer(unsigned int multiplier);
1da177e4
LT
533
534 if (count == sizeof(int)) {
535 unsigned int multiplier;
536
537 if (copy_from_user(&multiplier, buf, sizeof(int)))
538 return -EFAULT;
539
540 if (setup_profiling_timer(multiplier))
541 return -EINVAL;
542 }
543#endif
544 profile_discard_flip_buffers();
545 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
546 return count;
547}
548
15ad7cdc 549static const struct file_operations proc_profile_operations = {
1da177e4
LT
550 .read = read_profile,
551 .write = write_profile,
552};
553
554#ifdef CONFIG_SMP
60a51513 555static void profile_nop(void *unused)
1da177e4
LT
556{
557}
558
22b8ce94 559static int create_hash_tables(void)
1da177e4
LT
560{
561 int cpu;
562
563 for_each_online_cpu(cpu) {
564 int node = cpu_to_node(cpu);
565 struct page *page;
566
fbd98167
CL
567 page = alloc_pages_node(node,
568 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
569 0);
1da177e4
LT
570 if (!page)
571 goto out_cleanup;
572 per_cpu(cpu_profile_hits, cpu)[1]
573 = (struct profile_hit *)page_address(page);
fbd98167
CL
574 page = alloc_pages_node(node,
575 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
576 0);
1da177e4
LT
577 if (!page)
578 goto out_cleanup;
579 per_cpu(cpu_profile_hits, cpu)[0]
580 = (struct profile_hit *)page_address(page);
581 }
582 return 0;
583out_cleanup:
584 prof_on = 0;
d59dd462 585 smp_mb();
15c8b6c1 586 on_each_cpu(profile_nop, NULL, 1);
1da177e4
LT
587 for_each_online_cpu(cpu) {
588 struct page *page;
589
590 if (per_cpu(cpu_profile_hits, cpu)[0]) {
591 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
592 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
593 __free_page(page);
594 }
595 if (per_cpu(cpu_profile_hits, cpu)[1]) {
596 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
597 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
598 __free_page(page);
599 }
600 }
601 return -1;
602}
603#else
604#define create_hash_tables() ({ 0; })
605#endif
606
84196414 607int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
1da177e4
LT
608{
609 struct proc_dir_entry *entry;
610
611 if (!prof_on)
612 return 0;
613 if (create_hash_tables())
22b8ce94 614 return -ENOMEM;
c33fff0a
DL
615 entry = proc_create("profile", S_IWUSR | S_IRUGO,
616 NULL, &proc_profile_operations);
1ad82fd5 617 if (!entry)
1da177e4 618 return 0;
1da177e4
LT
619 entry->size = (1+prof_len) * sizeof(atomic_t);
620 hotcpu_notifier(profile_cpu_callback, 0);
621 return 0;
622}
623module_init(create_proc_profile);
624#endif /* CONFIG_PROC_FS */