Merge tag 'for-3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / profile.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
7 *
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
9 * Red Hat, July 2004
10 * Consolidation of architecture support code for profiling,
6d49e352 11 * Nadia Yvette Chambers, Oracle, July 2004
1da177e4 12 * Amortized hit count accounting via per-cpu open-addressed hashtables
6d49e352
NYC
13 * to resolve timer interrupt livelocks, Nadia Yvette Chambers,
14 * Oracle, 2004
1da177e4
LT
15 */
16
9984de1a 17#include <linux/export.h>
1da177e4
LT
18#include <linux/profile.h>
19#include <linux/bootmem.h>
20#include <linux/notifier.h>
21#include <linux/mm.h>
22#include <linux/cpumask.h>
23#include <linux/cpu.h>
1da177e4 24#include <linux/highmem.h>
97d1f15b 25#include <linux/mutex.h>
22b8ce94
DH
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
1da177e4 28#include <asm/sections.h>
7d12e780 29#include <asm/irq_regs.h>
e8edc6e0 30#include <asm/ptrace.h>
1da177e4
LT
31
32struct profile_hit {
33 u32 pc, hits;
34};
35#define PROFILE_GRPSHIFT 3
36#define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
37#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
38#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
39
40/* Oprofile timer tick hook */
b012d346 41static int (*timer_hook)(struct pt_regs *) __read_mostly;
1da177e4
LT
42
43static atomic_t *prof_buffer;
44static unsigned long prof_len, prof_shift;
07031e14 45
ece8a684 46int prof_on __read_mostly;
07031e14
IM
47EXPORT_SYMBOL_GPL(prof_on);
48
c309b917 49static cpumask_var_t prof_cpu_mask;
1da177e4
LT
50#ifdef CONFIG_SMP
51static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
52static DEFINE_PER_CPU(int, cpu_profile_flip);
97d1f15b 53static DEFINE_MUTEX(profile_flip_mutex);
1da177e4
LT
54#endif /* CONFIG_SMP */
55
22b8ce94 56int profile_setup(char *str)
1da177e4 57{
22b8ce94
DH
58 static char schedstr[] = "schedule";
59 static char sleepstr[] = "sleep";
60 static char kvmstr[] = "kvm";
1da177e4
LT
61 int par;
62
ece8a684 63 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
b3da2a73 64#ifdef CONFIG_SCHEDSTATS
ece8a684
IM
65 prof_on = SLEEP_PROFILING;
66 if (str[strlen(sleepstr)] == ',')
67 str += strlen(sleepstr) + 1;
68 if (get_option(&str, &par))
69 prof_shift = par;
70 printk(KERN_INFO
71 "kernel sleep profiling enabled (shift: %ld)\n",
72 prof_shift);
b3da2a73
MG
73#else
74 printk(KERN_WARNING
75 "kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
76#endif /* CONFIG_SCHEDSTATS */
a75acf85 77 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
1da177e4 78 prof_on = SCHED_PROFILING;
dfaa9c94
WLII
79 if (str[strlen(schedstr)] == ',')
80 str += strlen(schedstr) + 1;
81 if (get_option(&str, &par))
82 prof_shift = par;
83 printk(KERN_INFO
84 "kernel schedule profiling enabled (shift: %ld)\n",
85 prof_shift);
07031e14
IM
86 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
87 prof_on = KVM_PROFILING;
88 if (str[strlen(kvmstr)] == ',')
89 str += strlen(kvmstr) + 1;
90 if (get_option(&str, &par))
91 prof_shift = par;
92 printk(KERN_INFO
93 "kernel KVM profiling enabled (shift: %ld)\n",
94 prof_shift);
dfaa9c94 95 } else if (get_option(&str, &par)) {
1da177e4
LT
96 prof_shift = par;
97 prof_on = CPU_PROFILING;
98 printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
99 prof_shift);
100 }
101 return 1;
102}
103__setup("profile=", profile_setup);
104
105
ce05fcc3 106int __ref profile_init(void)
1da177e4 107{
22b8ce94 108 int buffer_bytes;
1ad82fd5 109 if (!prof_on)
22b8ce94 110 return 0;
1ad82fd5 111
1da177e4
LT
112 /* only text is profiled */
113 prof_len = (_etext - _stext) >> prof_shift;
22b8ce94 114 buffer_bytes = prof_len*sizeof(atomic_t);
22b8ce94 115
c309b917
RR
116 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
117 return -ENOMEM;
118
acd89579
HD
119 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
120
b62f495d 121 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
22b8ce94
DH
122 if (prof_buffer)
123 return 0;
124
b62f495d
MG
125 prof_buffer = alloc_pages_exact(buffer_bytes,
126 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
22b8ce94
DH
127 if (prof_buffer)
128 return 0;
129
559fa6e7
JJ
130 prof_buffer = vzalloc(buffer_bytes);
131 if (prof_buffer)
22b8ce94
DH
132 return 0;
133
c309b917 134 free_cpumask_var(prof_cpu_mask);
22b8ce94 135 return -ENOMEM;
1da177e4
LT
136}
137
138/* Profile event notifications */
1ad82fd5 139
e041c683
AS
140static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
141static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
142static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
1ad82fd5
PC
143
144void profile_task_exit(struct task_struct *task)
1da177e4 145{
e041c683 146 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
1da177e4 147}
1ad82fd5
PC
148
149int profile_handoff_task(struct task_struct *task)
1da177e4
LT
150{
151 int ret;
e041c683 152 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
1da177e4
LT
153 return (ret == NOTIFY_OK) ? 1 : 0;
154}
155
156void profile_munmap(unsigned long addr)
157{
e041c683 158 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
1da177e4
LT
159}
160
1ad82fd5 161int task_handoff_register(struct notifier_block *n)
1da177e4 162{
e041c683 163 return atomic_notifier_chain_register(&task_free_notifier, n);
1da177e4 164}
1ad82fd5 165EXPORT_SYMBOL_GPL(task_handoff_register);
1da177e4 166
1ad82fd5 167int task_handoff_unregister(struct notifier_block *n)
1da177e4 168{
e041c683 169 return atomic_notifier_chain_unregister(&task_free_notifier, n);
1da177e4 170}
1ad82fd5 171EXPORT_SYMBOL_GPL(task_handoff_unregister);
1da177e4 172
1ad82fd5 173int profile_event_register(enum profile_type type, struct notifier_block *n)
1da177e4
LT
174{
175 int err = -EINVAL;
1ad82fd5 176
1da177e4 177 switch (type) {
1ad82fd5
PC
178 case PROFILE_TASK_EXIT:
179 err = blocking_notifier_chain_register(
180 &task_exit_notifier, n);
181 break;
182 case PROFILE_MUNMAP:
183 err = blocking_notifier_chain_register(
184 &munmap_notifier, n);
185 break;
1da177e4 186 }
1ad82fd5 187
1da177e4
LT
188 return err;
189}
1ad82fd5 190EXPORT_SYMBOL_GPL(profile_event_register);
1da177e4 191
1ad82fd5 192int profile_event_unregister(enum profile_type type, struct notifier_block *n)
1da177e4
LT
193{
194 int err = -EINVAL;
1ad82fd5 195
1da177e4 196 switch (type) {
1ad82fd5
PC
197 case PROFILE_TASK_EXIT:
198 err = blocking_notifier_chain_unregister(
199 &task_exit_notifier, n);
200 break;
201 case PROFILE_MUNMAP:
202 err = blocking_notifier_chain_unregister(
203 &munmap_notifier, n);
204 break;
1da177e4
LT
205 }
206
1da177e4
LT
207 return err;
208}
1ad82fd5 209EXPORT_SYMBOL_GPL(profile_event_unregister);
1da177e4
LT
210
211int register_timer_hook(int (*hook)(struct pt_regs *))
212{
213 if (timer_hook)
214 return -EBUSY;
215 timer_hook = hook;
216 return 0;
217}
1ad82fd5 218EXPORT_SYMBOL_GPL(register_timer_hook);
1da177e4
LT
219
220void unregister_timer_hook(int (*hook)(struct pt_regs *))
221{
222 WARN_ON(hook != timer_hook);
223 timer_hook = NULL;
224 /* make sure all CPUs see the NULL hook */
fbd568a3 225 synchronize_sched(); /* Allow ongoing interrupts to complete. */
1da177e4 226}
1da177e4 227EXPORT_SYMBOL_GPL(unregister_timer_hook);
1da177e4 228
1da177e4
LT
229
230#ifdef CONFIG_SMP
231/*
232 * Each cpu has a pair of open-addressed hashtables for pending
233 * profile hits. read_profile() IPI's all cpus to request them
234 * to flip buffers and flushes their contents to prof_buffer itself.
235 * Flip requests are serialized by the profile_flip_mutex. The sole
236 * use of having a second hashtable is for avoiding cacheline
237 * contention that would otherwise happen during flushes of pending
238 * profile hits required for the accuracy of reported profile hits
239 * and so resurrect the interrupt livelock issue.
240 *
241 * The open-addressed hashtables are indexed by profile buffer slot
242 * and hold the number of pending hits to that profile buffer slot on
243 * a cpu in an entry. When the hashtable overflows, all pending hits
244 * are accounted to their corresponding profile buffer slots with
245 * atomic_add() and the hashtable emptied. As numerous pending hits
246 * may be accounted to a profile buffer slot in a hashtable entry,
247 * this amortizes a number of atomic profile buffer increments likely
248 * to be far larger than the number of entries in the hashtable,
249 * particularly given that the number of distinct profile buffer
250 * positions to which hits are accounted during short intervals (e.g.
251 * several seconds) is usually very small. Exclusion from buffer
252 * flipping is provided by interrupt disablement (note that for
ece8a684
IM
253 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
254 * process context).
1da177e4
LT
255 * The hash function is meant to be lightweight as opposed to strong,
256 * and was vaguely inspired by ppc64 firmware-supported inverted
257 * pagetable hash functions, but uses a full hashtable full of finite
258 * collision chains, not just pairs of them.
259 *
6d49e352 260 * -- nyc
1da177e4
LT
261 */
262static void __profile_flip_buffers(void *unused)
263{
264 int cpu = smp_processor_id();
265
266 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
267}
268
269static void profile_flip_buffers(void)
270{
271 int i, j, cpu;
272
97d1f15b 273 mutex_lock(&profile_flip_mutex);
1da177e4
LT
274 j = per_cpu(cpu_profile_flip, get_cpu());
275 put_cpu();
15c8b6c1 276 on_each_cpu(__profile_flip_buffers, NULL, 1);
1da177e4
LT
277 for_each_online_cpu(cpu) {
278 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
279 for (i = 0; i < NR_PROFILE_HIT; ++i) {
280 if (!hits[i].hits) {
281 if (hits[i].pc)
282 hits[i].pc = 0;
283 continue;
284 }
285 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
286 hits[i].hits = hits[i].pc = 0;
287 }
288 }
97d1f15b 289 mutex_unlock(&profile_flip_mutex);
1da177e4
LT
290}
291
292static void profile_discard_flip_buffers(void)
293{
294 int i, cpu;
295
97d1f15b 296 mutex_lock(&profile_flip_mutex);
1da177e4
LT
297 i = per_cpu(cpu_profile_flip, get_cpu());
298 put_cpu();
15c8b6c1 299 on_each_cpu(__profile_flip_buffers, NULL, 1);
1da177e4
LT
300 for_each_online_cpu(cpu) {
301 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
302 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
303 }
97d1f15b 304 mutex_unlock(&profile_flip_mutex);
1da177e4
LT
305}
306
6f7bd76f 307static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
1da177e4
LT
308{
309 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
310 int i, j, cpu;
311 struct profile_hit *hits;
312
1da177e4
LT
313 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
314 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
315 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
316 cpu = get_cpu();
317 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
318 if (!hits) {
319 put_cpu();
320 return;
321 }
ece8a684
IM
322 /*
323 * We buffer the global profiler buffer into a per-CPU
324 * queue and thus reduce the number of global (and possibly
325 * NUMA-alien) accesses. The write-queue is self-coalescing:
326 */
1da177e4
LT
327 local_irq_save(flags);
328 do {
329 for (j = 0; j < PROFILE_GRPSZ; ++j) {
330 if (hits[i + j].pc == pc) {
ece8a684 331 hits[i + j].hits += nr_hits;
1da177e4
LT
332 goto out;
333 } else if (!hits[i + j].hits) {
334 hits[i + j].pc = pc;
ece8a684 335 hits[i + j].hits = nr_hits;
1da177e4
LT
336 goto out;
337 }
338 }
339 i = (i + secondary) & (NR_PROFILE_HIT - 1);
340 } while (i != primary);
ece8a684
IM
341
342 /*
343 * Add the current hit(s) and flush the write-queue out
344 * to the global buffer:
345 */
346 atomic_add(nr_hits, &prof_buffer[pc]);
1da177e4
LT
347 for (i = 0; i < NR_PROFILE_HIT; ++i) {
348 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
349 hits[i].pc = hits[i].hits = 0;
350 }
351out:
352 local_irq_restore(flags);
353 put_cpu();
354}
355
84196414 356static int __cpuinit profile_cpu_callback(struct notifier_block *info,
1da177e4
LT
357 unsigned long action, void *__cpu)
358{
359 int node, cpu = (unsigned long)__cpu;
360 struct page *page;
361
362 switch (action) {
363 case CPU_UP_PREPARE:
8bb78442 364 case CPU_UP_PREPARE_FROZEN:
3dd6b5fb 365 node = cpu_to_mem(cpu);
1da177e4
LT
366 per_cpu(cpu_profile_flip, cpu) = 0;
367 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
6484eb3e 368 page = alloc_pages_exact_node(node,
4199cfa0 369 GFP_KERNEL | __GFP_ZERO,
fbd98167 370 0);
1da177e4 371 if (!page)
80b5184c 372 return notifier_from_errno(-ENOMEM);
1da177e4
LT
373 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
374 }
375 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
6484eb3e 376 page = alloc_pages_exact_node(node,
4199cfa0 377 GFP_KERNEL | __GFP_ZERO,
fbd98167 378 0);
1da177e4
LT
379 if (!page)
380 goto out_free;
381 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
382 }
383 break;
1ad82fd5 384out_free:
1da177e4
LT
385 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
386 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
387 __free_page(page);
80b5184c 388 return notifier_from_errno(-ENOMEM);
1da177e4 389 case CPU_ONLINE:
8bb78442 390 case CPU_ONLINE_FROZEN:
c309b917
RR
391 if (prof_cpu_mask != NULL)
392 cpumask_set_cpu(cpu, prof_cpu_mask);
1da177e4
LT
393 break;
394 case CPU_UP_CANCELED:
8bb78442 395 case CPU_UP_CANCELED_FROZEN:
1da177e4 396 case CPU_DEAD:
8bb78442 397 case CPU_DEAD_FROZEN:
c309b917
RR
398 if (prof_cpu_mask != NULL)
399 cpumask_clear_cpu(cpu, prof_cpu_mask);
1da177e4
LT
400 if (per_cpu(cpu_profile_hits, cpu)[0]) {
401 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
402 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
403 __free_page(page);
404 }
405 if (per_cpu(cpu_profile_hits, cpu)[1]) {
406 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
407 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
408 __free_page(page);
409 }
410 break;
411 }
412 return NOTIFY_OK;
413}
1da177e4
LT
414#else /* !CONFIG_SMP */
415#define profile_flip_buffers() do { } while (0)
416#define profile_discard_flip_buffers() do { } while (0)
02316067 417#define profile_cpu_callback NULL
1da177e4 418
6f7bd76f 419static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
1da177e4
LT
420{
421 unsigned long pc;
1da177e4 422 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
ece8a684 423 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
1da177e4
LT
424}
425#endif /* !CONFIG_SMP */
6f7bd76f
RM
426
427void profile_hits(int type, void *__pc, unsigned int nr_hits)
428{
429 if (prof_on != type || !prof_buffer)
430 return;
431 do_profile_hits(type, __pc, nr_hits);
432}
bbe1a59b
AM
433EXPORT_SYMBOL_GPL(profile_hits);
434
7d12e780 435void profile_tick(int type)
1da177e4 436{
7d12e780
DH
437 struct pt_regs *regs = get_irq_regs();
438
1da177e4
LT
439 if (type == CPU_PROFILING && timer_hook)
440 timer_hook(regs);
c309b917
RR
441 if (!user_mode(regs) && prof_cpu_mask != NULL &&
442 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
1da177e4
LT
443 profile_hit(type, (void *)profile_pc(regs));
444}
445
446#ifdef CONFIG_PROC_FS
447#include <linux/proc_fs.h>
583a22e7 448#include <linux/seq_file.h>
1da177e4 449#include <asm/uaccess.h>
1da177e4 450
583a22e7 451static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
1da177e4 452{
583a22e7
AD
453 seq_cpumask(m, prof_cpu_mask);
454 seq_putc(m, '\n');
455 return 0;
456}
457
458static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
459{
460 return single_open(file, prof_cpu_mask_proc_show, NULL);
1da177e4
LT
461}
462
583a22e7
AD
463static ssize_t prof_cpu_mask_proc_write(struct file *file,
464 const char __user *buffer, size_t count, loff_t *pos)
1da177e4 465{
c309b917 466 cpumask_var_t new_value;
583a22e7 467 int err;
1da177e4 468
c309b917
RR
469 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
470 return -ENOMEM;
1da177e4 471
c309b917
RR
472 err = cpumask_parse_user(buffer, count, new_value);
473 if (!err) {
583a22e7
AD
474 cpumask_copy(prof_cpu_mask, new_value);
475 err = count;
c309b917
RR
476 }
477 free_cpumask_var(new_value);
478 return err;
1da177e4
LT
479}
480
583a22e7
AD
481static const struct file_operations prof_cpu_mask_proc_fops = {
482 .open = prof_cpu_mask_proc_open,
483 .read = seq_read,
484 .llseek = seq_lseek,
485 .release = single_release,
486 .write = prof_cpu_mask_proc_write,
487};
488
1da177e4
LT
489void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
490{
1da177e4 491 /* create /proc/irq/prof_cpu_mask */
583a22e7 492 proc_create("prof_cpu_mask", 0600, root_irq_dir, &prof_cpu_mask_proc_fops);
1da177e4
LT
493}
494
495/*
496 * This function accesses profiling information. The returned data is
497 * binary: the sampling step and the actual contents of the profile
498 * buffer. Use of the program readprofile is recommended in order to
499 * get meaningful info out of these data.
500 */
501static ssize_t
502read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
503{
504 unsigned long p = *ppos;
505 ssize_t read;
1ad82fd5 506 char *pnt;
1da177e4
LT
507 unsigned int sample_step = 1 << prof_shift;
508
509 profile_flip_buffers();
510 if (p >= (prof_len+1)*sizeof(unsigned int))
511 return 0;
512 if (count > (prof_len+1)*sizeof(unsigned int) - p)
513 count = (prof_len+1)*sizeof(unsigned int) - p;
514 read = 0;
515
516 while (p < sizeof(unsigned int) && count > 0) {
1ad82fd5 517 if (put_user(*((char *)(&sample_step)+p), buf))
064b022c 518 return -EFAULT;
1da177e4
LT
519 buf++; p++; count--; read++;
520 }
521 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
1ad82fd5 522 if (copy_to_user(buf, (void *)pnt, count))
1da177e4
LT
523 return -EFAULT;
524 read += count;
525 *ppos += read;
526 return read;
527}
528
529/*
530 * Writing to /proc/profile resets the counters
531 *
532 * Writing a 'profiling multiplier' value into it also re-sets the profiling
533 * interrupt frequency, on architectures that support this.
534 */
535static ssize_t write_profile(struct file *file, const char __user *buf,
536 size_t count, loff_t *ppos)
537{
538#ifdef CONFIG_SMP
1ad82fd5 539 extern int setup_profiling_timer(unsigned int multiplier);
1da177e4
LT
540
541 if (count == sizeof(int)) {
542 unsigned int multiplier;
543
544 if (copy_from_user(&multiplier, buf, sizeof(int)))
545 return -EFAULT;
546
547 if (setup_profiling_timer(multiplier))
548 return -EINVAL;
549 }
550#endif
551 profile_discard_flip_buffers();
552 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
553 return count;
554}
555
15ad7cdc 556static const struct file_operations proc_profile_operations = {
1da177e4
LT
557 .read = read_profile,
558 .write = write_profile,
6038f373 559 .llseek = default_llseek,
1da177e4
LT
560};
561
562#ifdef CONFIG_SMP
60a51513 563static void profile_nop(void *unused)
1da177e4
LT
564{
565}
566
22b8ce94 567static int create_hash_tables(void)
1da177e4
LT
568{
569 int cpu;
570
571 for_each_online_cpu(cpu) {
3dd6b5fb 572 int node = cpu_to_mem(cpu);
1da177e4
LT
573 struct page *page;
574
6484eb3e 575 page = alloc_pages_exact_node(node,
fbd98167
CL
576 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
577 0);
1da177e4
LT
578 if (!page)
579 goto out_cleanup;
580 per_cpu(cpu_profile_hits, cpu)[1]
581 = (struct profile_hit *)page_address(page);
6484eb3e 582 page = alloc_pages_exact_node(node,
fbd98167
CL
583 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
584 0);
1da177e4
LT
585 if (!page)
586 goto out_cleanup;
587 per_cpu(cpu_profile_hits, cpu)[0]
588 = (struct profile_hit *)page_address(page);
589 }
590 return 0;
591out_cleanup:
592 prof_on = 0;
d59dd462 593 smp_mb();
15c8b6c1 594 on_each_cpu(profile_nop, NULL, 1);
1da177e4
LT
595 for_each_online_cpu(cpu) {
596 struct page *page;
597
598 if (per_cpu(cpu_profile_hits, cpu)[0]) {
599 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
600 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
601 __free_page(page);
602 }
603 if (per_cpu(cpu_profile_hits, cpu)[1]) {
604 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
605 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
606 __free_page(page);
607 }
608 }
609 return -1;
610}
611#else
612#define create_hash_tables() ({ 0; })
613#endif
614
84196414 615int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
1da177e4
LT
616{
617 struct proc_dir_entry *entry;
618
619 if (!prof_on)
620 return 0;
621 if (create_hash_tables())
22b8ce94 622 return -ENOMEM;
c33fff0a
DL
623 entry = proc_create("profile", S_IWUSR | S_IRUGO,
624 NULL, &proc_profile_operations);
1ad82fd5 625 if (!entry)
1da177e4 626 return 0;
1da177e4
LT
627 entry->size = (1+prof_len) * sizeof(atomic_t);
628 hotcpu_notifier(profile_cpu_callback, 0);
629 return 0;
630}
631module_init(create_proc_profile);
632#endif /* CONFIG_PROC_FS */