perf_counter: frequency based adaptive irq_period, 32-bit fix
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / perf_counter.c
1 /*
2 * Performance counter core code
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/ptrace.h>
20 #include <linux/percpu.h>
21 #include <linux/vmstat.h>
22 #include <linux/hardirq.h>
23 #include <linux/rculist.h>
24 #include <linux/uaccess.h>
25 #include <linux/syscalls.h>
26 #include <linux/anon_inodes.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/perf_counter.h>
29 #include <linux/dcache.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34 * Each CPU has a list of per CPU counters:
35 */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_tracking __read_mostly;
44 static atomic_t nr_munmap_tracking __read_mostly;
45 static atomic_t nr_comm_tracking __read_mostly;
46
47 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49
50 /*
51 * Lock for (sysadmin-configurable) counter reservations:
52 */
53 static DEFINE_SPINLOCK(perf_resource_lock);
54
55 /*
56 * Architecture provided APIs - weak aliases:
57 */
58 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
59 {
60 return NULL;
61 }
62
63 void __weak hw_perf_disable(void) { barrier(); }
64 void __weak hw_perf_enable(void) { barrier(); }
65
66 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
67 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
68 struct perf_cpu_context *cpuctx,
69 struct perf_counter_context *ctx, int cpu)
70 {
71 return 0;
72 }
73
74 void __weak perf_counter_print_debug(void) { }
75
76 static DEFINE_PER_CPU(int, disable_count);
77
78 void __perf_disable(void)
79 {
80 __get_cpu_var(disable_count)++;
81 }
82
83 bool __perf_enable(void)
84 {
85 return !--__get_cpu_var(disable_count);
86 }
87
88 void perf_disable(void)
89 {
90 __perf_disable();
91 hw_perf_disable();
92 }
93
94 void perf_enable(void)
95 {
96 if (__perf_enable())
97 hw_perf_enable();
98 }
99
100 static void
101 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
102 {
103 struct perf_counter *group_leader = counter->group_leader;
104
105 /*
106 * Depending on whether it is a standalone or sibling counter,
107 * add it straight to the context's counter list, or to the group
108 * leader's sibling list:
109 */
110 if (group_leader == counter)
111 list_add_tail(&counter->list_entry, &ctx->counter_list);
112 else {
113 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
114 group_leader->nr_siblings++;
115 }
116
117 list_add_rcu(&counter->event_entry, &ctx->event_list);
118 }
119
120 static void
121 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
122 {
123 struct perf_counter *sibling, *tmp;
124
125 list_del_init(&counter->list_entry);
126 list_del_rcu(&counter->event_entry);
127
128 if (counter->group_leader != counter)
129 counter->group_leader->nr_siblings--;
130
131 /*
132 * If this was a group counter with sibling counters then
133 * upgrade the siblings to singleton counters by adding them
134 * to the context list directly:
135 */
136 list_for_each_entry_safe(sibling, tmp,
137 &counter->sibling_list, list_entry) {
138
139 list_move_tail(&sibling->list_entry, &ctx->counter_list);
140 sibling->group_leader = sibling;
141 }
142 }
143
144 static void
145 counter_sched_out(struct perf_counter *counter,
146 struct perf_cpu_context *cpuctx,
147 struct perf_counter_context *ctx)
148 {
149 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
150 return;
151
152 counter->state = PERF_COUNTER_STATE_INACTIVE;
153 counter->tstamp_stopped = ctx->time;
154 counter->pmu->disable(counter);
155 counter->oncpu = -1;
156
157 if (!is_software_counter(counter))
158 cpuctx->active_oncpu--;
159 ctx->nr_active--;
160 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
161 cpuctx->exclusive = 0;
162 }
163
164 static void
165 group_sched_out(struct perf_counter *group_counter,
166 struct perf_cpu_context *cpuctx,
167 struct perf_counter_context *ctx)
168 {
169 struct perf_counter *counter;
170
171 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
172 return;
173
174 counter_sched_out(group_counter, cpuctx, ctx);
175
176 /*
177 * Schedule out siblings (if any):
178 */
179 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
180 counter_sched_out(counter, cpuctx, ctx);
181
182 if (group_counter->hw_event.exclusive)
183 cpuctx->exclusive = 0;
184 }
185
186 /*
187 * Cross CPU call to remove a performance counter
188 *
189 * We disable the counter on the hardware level first. After that we
190 * remove it from the context list.
191 */
192 static void __perf_counter_remove_from_context(void *info)
193 {
194 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
195 struct perf_counter *counter = info;
196 struct perf_counter_context *ctx = counter->ctx;
197 unsigned long flags;
198
199 /*
200 * If this is a task context, we need to check whether it is
201 * the current task context of this cpu. If not it has been
202 * scheduled out before the smp call arrived.
203 */
204 if (ctx->task && cpuctx->task_ctx != ctx)
205 return;
206
207 spin_lock_irqsave(&ctx->lock, flags);
208
209 counter_sched_out(counter, cpuctx, ctx);
210
211 counter->task = NULL;
212 ctx->nr_counters--;
213
214 /*
215 * Protect the list operation against NMI by disabling the
216 * counters on a global level. NOP for non NMI based counters.
217 */
218 perf_disable();
219 list_del_counter(counter, ctx);
220 perf_enable();
221
222 if (!ctx->task) {
223 /*
224 * Allow more per task counters with respect to the
225 * reservation:
226 */
227 cpuctx->max_pertask =
228 min(perf_max_counters - ctx->nr_counters,
229 perf_max_counters - perf_reserved_percpu);
230 }
231
232 spin_unlock_irqrestore(&ctx->lock, flags);
233 }
234
235
236 /*
237 * Remove the counter from a task's (or a CPU's) list of counters.
238 *
239 * Must be called with counter->mutex and ctx->mutex held.
240 *
241 * CPU counters are removed with a smp call. For task counters we only
242 * call when the task is on a CPU.
243 */
244 static void perf_counter_remove_from_context(struct perf_counter *counter)
245 {
246 struct perf_counter_context *ctx = counter->ctx;
247 struct task_struct *task = ctx->task;
248
249 if (!task) {
250 /*
251 * Per cpu counters are removed via an smp call and
252 * the removal is always sucessful.
253 */
254 smp_call_function_single(counter->cpu,
255 __perf_counter_remove_from_context,
256 counter, 1);
257 return;
258 }
259
260 retry:
261 task_oncpu_function_call(task, __perf_counter_remove_from_context,
262 counter);
263
264 spin_lock_irq(&ctx->lock);
265 /*
266 * If the context is active we need to retry the smp call.
267 */
268 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
269 spin_unlock_irq(&ctx->lock);
270 goto retry;
271 }
272
273 /*
274 * The lock prevents that this context is scheduled in so we
275 * can remove the counter safely, if the call above did not
276 * succeed.
277 */
278 if (!list_empty(&counter->list_entry)) {
279 ctx->nr_counters--;
280 list_del_counter(counter, ctx);
281 counter->task = NULL;
282 }
283 spin_unlock_irq(&ctx->lock);
284 }
285
286 static inline u64 perf_clock(void)
287 {
288 return cpu_clock(smp_processor_id());
289 }
290
291 /*
292 * Update the record of the current time in a context.
293 */
294 static void update_context_time(struct perf_counter_context *ctx)
295 {
296 u64 now = perf_clock();
297
298 ctx->time += now - ctx->timestamp;
299 ctx->timestamp = now;
300 }
301
302 /*
303 * Update the total_time_enabled and total_time_running fields for a counter.
304 */
305 static void update_counter_times(struct perf_counter *counter)
306 {
307 struct perf_counter_context *ctx = counter->ctx;
308 u64 run_end;
309
310 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
311 return;
312
313 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
314
315 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
316 run_end = counter->tstamp_stopped;
317 else
318 run_end = ctx->time;
319
320 counter->total_time_running = run_end - counter->tstamp_running;
321 }
322
323 /*
324 * Update total_time_enabled and total_time_running for all counters in a group.
325 */
326 static void update_group_times(struct perf_counter *leader)
327 {
328 struct perf_counter *counter;
329
330 update_counter_times(leader);
331 list_for_each_entry(counter, &leader->sibling_list, list_entry)
332 update_counter_times(counter);
333 }
334
335 /*
336 * Cross CPU call to disable a performance counter
337 */
338 static void __perf_counter_disable(void *info)
339 {
340 struct perf_counter *counter = info;
341 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
342 struct perf_counter_context *ctx = counter->ctx;
343 unsigned long flags;
344
345 /*
346 * If this is a per-task counter, need to check whether this
347 * counter's task is the current task on this cpu.
348 */
349 if (ctx->task && cpuctx->task_ctx != ctx)
350 return;
351
352 spin_lock_irqsave(&ctx->lock, flags);
353
354 /*
355 * If the counter is on, turn it off.
356 * If it is in error state, leave it in error state.
357 */
358 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
359 update_context_time(ctx);
360 update_counter_times(counter);
361 if (counter == counter->group_leader)
362 group_sched_out(counter, cpuctx, ctx);
363 else
364 counter_sched_out(counter, cpuctx, ctx);
365 counter->state = PERF_COUNTER_STATE_OFF;
366 }
367
368 spin_unlock_irqrestore(&ctx->lock, flags);
369 }
370
371 /*
372 * Disable a counter.
373 */
374 static void perf_counter_disable(struct perf_counter *counter)
375 {
376 struct perf_counter_context *ctx = counter->ctx;
377 struct task_struct *task = ctx->task;
378
379 if (!task) {
380 /*
381 * Disable the counter on the cpu that it's on
382 */
383 smp_call_function_single(counter->cpu, __perf_counter_disable,
384 counter, 1);
385 return;
386 }
387
388 retry:
389 task_oncpu_function_call(task, __perf_counter_disable, counter);
390
391 spin_lock_irq(&ctx->lock);
392 /*
393 * If the counter is still active, we need to retry the cross-call.
394 */
395 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
396 spin_unlock_irq(&ctx->lock);
397 goto retry;
398 }
399
400 /*
401 * Since we have the lock this context can't be scheduled
402 * in, so we can change the state safely.
403 */
404 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
405 update_counter_times(counter);
406 counter->state = PERF_COUNTER_STATE_OFF;
407 }
408
409 spin_unlock_irq(&ctx->lock);
410 }
411
412 static int
413 counter_sched_in(struct perf_counter *counter,
414 struct perf_cpu_context *cpuctx,
415 struct perf_counter_context *ctx,
416 int cpu)
417 {
418 if (counter->state <= PERF_COUNTER_STATE_OFF)
419 return 0;
420
421 counter->state = PERF_COUNTER_STATE_ACTIVE;
422 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
423 /*
424 * The new state must be visible before we turn it on in the hardware:
425 */
426 smp_wmb();
427
428 if (counter->pmu->enable(counter)) {
429 counter->state = PERF_COUNTER_STATE_INACTIVE;
430 counter->oncpu = -1;
431 return -EAGAIN;
432 }
433
434 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
435
436 if (!is_software_counter(counter))
437 cpuctx->active_oncpu++;
438 ctx->nr_active++;
439
440 if (counter->hw_event.exclusive)
441 cpuctx->exclusive = 1;
442
443 return 0;
444 }
445
446 static int
447 group_sched_in(struct perf_counter *group_counter,
448 struct perf_cpu_context *cpuctx,
449 struct perf_counter_context *ctx,
450 int cpu)
451 {
452 struct perf_counter *counter, *partial_group;
453 int ret;
454
455 if (group_counter->state == PERF_COUNTER_STATE_OFF)
456 return 0;
457
458 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
459 if (ret)
460 return ret < 0 ? ret : 0;
461
462 group_counter->prev_state = group_counter->state;
463 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
464 return -EAGAIN;
465
466 /*
467 * Schedule in siblings as one group (if any):
468 */
469 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
470 counter->prev_state = counter->state;
471 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
472 partial_group = counter;
473 goto group_error;
474 }
475 }
476
477 return 0;
478
479 group_error:
480 /*
481 * Groups can be scheduled in as one unit only, so undo any
482 * partial group before returning:
483 */
484 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
485 if (counter == partial_group)
486 break;
487 counter_sched_out(counter, cpuctx, ctx);
488 }
489 counter_sched_out(group_counter, cpuctx, ctx);
490
491 return -EAGAIN;
492 }
493
494 /*
495 * Return 1 for a group consisting entirely of software counters,
496 * 0 if the group contains any hardware counters.
497 */
498 static int is_software_only_group(struct perf_counter *leader)
499 {
500 struct perf_counter *counter;
501
502 if (!is_software_counter(leader))
503 return 0;
504
505 list_for_each_entry(counter, &leader->sibling_list, list_entry)
506 if (!is_software_counter(counter))
507 return 0;
508
509 return 1;
510 }
511
512 /*
513 * Work out whether we can put this counter group on the CPU now.
514 */
515 static int group_can_go_on(struct perf_counter *counter,
516 struct perf_cpu_context *cpuctx,
517 int can_add_hw)
518 {
519 /*
520 * Groups consisting entirely of software counters can always go on.
521 */
522 if (is_software_only_group(counter))
523 return 1;
524 /*
525 * If an exclusive group is already on, no other hardware
526 * counters can go on.
527 */
528 if (cpuctx->exclusive)
529 return 0;
530 /*
531 * If this group is exclusive and there are already
532 * counters on the CPU, it can't go on.
533 */
534 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
535 return 0;
536 /*
537 * Otherwise, try to add it if all previous groups were able
538 * to go on.
539 */
540 return can_add_hw;
541 }
542
543 static void add_counter_to_ctx(struct perf_counter *counter,
544 struct perf_counter_context *ctx)
545 {
546 list_add_counter(counter, ctx);
547 ctx->nr_counters++;
548 counter->prev_state = PERF_COUNTER_STATE_OFF;
549 counter->tstamp_enabled = ctx->time;
550 counter->tstamp_running = ctx->time;
551 counter->tstamp_stopped = ctx->time;
552 }
553
554 /*
555 * Cross CPU call to install and enable a performance counter
556 */
557 static void __perf_install_in_context(void *info)
558 {
559 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
560 struct perf_counter *counter = info;
561 struct perf_counter_context *ctx = counter->ctx;
562 struct perf_counter *leader = counter->group_leader;
563 int cpu = smp_processor_id();
564 unsigned long flags;
565 int err;
566
567 /*
568 * If this is a task context, we need to check whether it is
569 * the current task context of this cpu. If not it has been
570 * scheduled out before the smp call arrived.
571 */
572 if (ctx->task && cpuctx->task_ctx != ctx)
573 return;
574
575 spin_lock_irqsave(&ctx->lock, flags);
576 update_context_time(ctx);
577
578 /*
579 * Protect the list operation against NMI by disabling the
580 * counters on a global level. NOP for non NMI based counters.
581 */
582 perf_disable();
583
584 add_counter_to_ctx(counter, ctx);
585
586 /*
587 * Don't put the counter on if it is disabled or if
588 * it is in a group and the group isn't on.
589 */
590 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
591 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
592 goto unlock;
593
594 /*
595 * An exclusive counter can't go on if there are already active
596 * hardware counters, and no hardware counter can go on if there
597 * is already an exclusive counter on.
598 */
599 if (!group_can_go_on(counter, cpuctx, 1))
600 err = -EEXIST;
601 else
602 err = counter_sched_in(counter, cpuctx, ctx, cpu);
603
604 if (err) {
605 /*
606 * This counter couldn't go on. If it is in a group
607 * then we have to pull the whole group off.
608 * If the counter group is pinned then put it in error state.
609 */
610 if (leader != counter)
611 group_sched_out(leader, cpuctx, ctx);
612 if (leader->hw_event.pinned) {
613 update_group_times(leader);
614 leader->state = PERF_COUNTER_STATE_ERROR;
615 }
616 }
617
618 if (!err && !ctx->task && cpuctx->max_pertask)
619 cpuctx->max_pertask--;
620
621 unlock:
622 perf_enable();
623
624 spin_unlock_irqrestore(&ctx->lock, flags);
625 }
626
627 /*
628 * Attach a performance counter to a context
629 *
630 * First we add the counter to the list with the hardware enable bit
631 * in counter->hw_config cleared.
632 *
633 * If the counter is attached to a task which is on a CPU we use a smp
634 * call to enable it in the task context. The task might have been
635 * scheduled away, but we check this in the smp call again.
636 *
637 * Must be called with ctx->mutex held.
638 */
639 static void
640 perf_install_in_context(struct perf_counter_context *ctx,
641 struct perf_counter *counter,
642 int cpu)
643 {
644 struct task_struct *task = ctx->task;
645
646 if (!task) {
647 /*
648 * Per cpu counters are installed via an smp call and
649 * the install is always sucessful.
650 */
651 smp_call_function_single(cpu, __perf_install_in_context,
652 counter, 1);
653 return;
654 }
655
656 counter->task = task;
657 retry:
658 task_oncpu_function_call(task, __perf_install_in_context,
659 counter);
660
661 spin_lock_irq(&ctx->lock);
662 /*
663 * we need to retry the smp call.
664 */
665 if (ctx->is_active && list_empty(&counter->list_entry)) {
666 spin_unlock_irq(&ctx->lock);
667 goto retry;
668 }
669
670 /*
671 * The lock prevents that this context is scheduled in so we
672 * can add the counter safely, if it the call above did not
673 * succeed.
674 */
675 if (list_empty(&counter->list_entry))
676 add_counter_to_ctx(counter, ctx);
677 spin_unlock_irq(&ctx->lock);
678 }
679
680 /*
681 * Cross CPU call to enable a performance counter
682 */
683 static void __perf_counter_enable(void *info)
684 {
685 struct perf_counter *counter = info;
686 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
687 struct perf_counter_context *ctx = counter->ctx;
688 struct perf_counter *leader = counter->group_leader;
689 unsigned long flags;
690 int err;
691
692 /*
693 * If this is a per-task counter, need to check whether this
694 * counter's task is the current task on this cpu.
695 */
696 if (ctx->task && cpuctx->task_ctx != ctx)
697 return;
698
699 spin_lock_irqsave(&ctx->lock, flags);
700 update_context_time(ctx);
701
702 counter->prev_state = counter->state;
703 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
704 goto unlock;
705 counter->state = PERF_COUNTER_STATE_INACTIVE;
706 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
707
708 /*
709 * If the counter is in a group and isn't the group leader,
710 * then don't put it on unless the group is on.
711 */
712 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
713 goto unlock;
714
715 if (!group_can_go_on(counter, cpuctx, 1)) {
716 err = -EEXIST;
717 } else {
718 perf_disable();
719 if (counter == leader)
720 err = group_sched_in(counter, cpuctx, ctx,
721 smp_processor_id());
722 else
723 err = counter_sched_in(counter, cpuctx, ctx,
724 smp_processor_id());
725 perf_enable();
726 }
727
728 if (err) {
729 /*
730 * If this counter can't go on and it's part of a
731 * group, then the whole group has to come off.
732 */
733 if (leader != counter)
734 group_sched_out(leader, cpuctx, ctx);
735 if (leader->hw_event.pinned) {
736 update_group_times(leader);
737 leader->state = PERF_COUNTER_STATE_ERROR;
738 }
739 }
740
741 unlock:
742 spin_unlock_irqrestore(&ctx->lock, flags);
743 }
744
745 /*
746 * Enable a counter.
747 */
748 static void perf_counter_enable(struct perf_counter *counter)
749 {
750 struct perf_counter_context *ctx = counter->ctx;
751 struct task_struct *task = ctx->task;
752
753 if (!task) {
754 /*
755 * Enable the counter on the cpu that it's on
756 */
757 smp_call_function_single(counter->cpu, __perf_counter_enable,
758 counter, 1);
759 return;
760 }
761
762 spin_lock_irq(&ctx->lock);
763 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
764 goto out;
765
766 /*
767 * If the counter is in error state, clear that first.
768 * That way, if we see the counter in error state below, we
769 * know that it has gone back into error state, as distinct
770 * from the task having been scheduled away before the
771 * cross-call arrived.
772 */
773 if (counter->state == PERF_COUNTER_STATE_ERROR)
774 counter->state = PERF_COUNTER_STATE_OFF;
775
776 retry:
777 spin_unlock_irq(&ctx->lock);
778 task_oncpu_function_call(task, __perf_counter_enable, counter);
779
780 spin_lock_irq(&ctx->lock);
781
782 /*
783 * If the context is active and the counter is still off,
784 * we need to retry the cross-call.
785 */
786 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
787 goto retry;
788
789 /*
790 * Since we have the lock this context can't be scheduled
791 * in, so we can change the state safely.
792 */
793 if (counter->state == PERF_COUNTER_STATE_OFF) {
794 counter->state = PERF_COUNTER_STATE_INACTIVE;
795 counter->tstamp_enabled =
796 ctx->time - counter->total_time_enabled;
797 }
798 out:
799 spin_unlock_irq(&ctx->lock);
800 }
801
802 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
803 {
804 /*
805 * not supported on inherited counters
806 */
807 if (counter->hw_event.inherit)
808 return -EINVAL;
809
810 atomic_add(refresh, &counter->event_limit);
811 perf_counter_enable(counter);
812
813 return 0;
814 }
815
816 void __perf_counter_sched_out(struct perf_counter_context *ctx,
817 struct perf_cpu_context *cpuctx)
818 {
819 struct perf_counter *counter;
820
821 spin_lock(&ctx->lock);
822 ctx->is_active = 0;
823 if (likely(!ctx->nr_counters))
824 goto out;
825 update_context_time(ctx);
826
827 perf_disable();
828 if (ctx->nr_active) {
829 list_for_each_entry(counter, &ctx->counter_list, list_entry)
830 group_sched_out(counter, cpuctx, ctx);
831 }
832 perf_enable();
833 out:
834 spin_unlock(&ctx->lock);
835 }
836
837 /*
838 * Called from scheduler to remove the counters of the current task,
839 * with interrupts disabled.
840 *
841 * We stop each counter and update the counter value in counter->count.
842 *
843 * This does not protect us against NMI, but disable()
844 * sets the disabled bit in the control field of counter _before_
845 * accessing the counter control register. If a NMI hits, then it will
846 * not restart the counter.
847 */
848 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
849 {
850 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
851 struct perf_counter_context *ctx = &task->perf_counter_ctx;
852 struct pt_regs *regs;
853
854 if (likely(!cpuctx->task_ctx))
855 return;
856
857 update_context_time(ctx);
858
859 regs = task_pt_regs(task);
860 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
861 __perf_counter_sched_out(ctx, cpuctx);
862
863 cpuctx->task_ctx = NULL;
864 }
865
866 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
867 {
868 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
869
870 __perf_counter_sched_out(ctx, cpuctx);
871 cpuctx->task_ctx = NULL;
872 }
873
874 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
875 {
876 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
877 }
878
879 static void
880 __perf_counter_sched_in(struct perf_counter_context *ctx,
881 struct perf_cpu_context *cpuctx, int cpu)
882 {
883 struct perf_counter *counter;
884 int can_add_hw = 1;
885
886 spin_lock(&ctx->lock);
887 ctx->is_active = 1;
888 if (likely(!ctx->nr_counters))
889 goto out;
890
891 ctx->timestamp = perf_clock();
892
893 perf_disable();
894
895 /*
896 * First go through the list and put on any pinned groups
897 * in order to give them the best chance of going on.
898 */
899 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
900 if (counter->state <= PERF_COUNTER_STATE_OFF ||
901 !counter->hw_event.pinned)
902 continue;
903 if (counter->cpu != -1 && counter->cpu != cpu)
904 continue;
905
906 if (group_can_go_on(counter, cpuctx, 1))
907 group_sched_in(counter, cpuctx, ctx, cpu);
908
909 /*
910 * If this pinned group hasn't been scheduled,
911 * put it in error state.
912 */
913 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
914 update_group_times(counter);
915 counter->state = PERF_COUNTER_STATE_ERROR;
916 }
917 }
918
919 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
920 /*
921 * Ignore counters in OFF or ERROR state, and
922 * ignore pinned counters since we did them already.
923 */
924 if (counter->state <= PERF_COUNTER_STATE_OFF ||
925 counter->hw_event.pinned)
926 continue;
927
928 /*
929 * Listen to the 'cpu' scheduling filter constraint
930 * of counters:
931 */
932 if (counter->cpu != -1 && counter->cpu != cpu)
933 continue;
934
935 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
936 if (group_sched_in(counter, cpuctx, ctx, cpu))
937 can_add_hw = 0;
938 }
939 }
940 perf_enable();
941 out:
942 spin_unlock(&ctx->lock);
943 }
944
945 /*
946 * Called from scheduler to add the counters of the current task
947 * with interrupts disabled.
948 *
949 * We restore the counter value and then enable it.
950 *
951 * This does not protect us against NMI, but enable()
952 * sets the enabled bit in the control field of counter _before_
953 * accessing the counter control register. If a NMI hits, then it will
954 * keep the counter running.
955 */
956 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
957 {
958 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
959 struct perf_counter_context *ctx = &task->perf_counter_ctx;
960
961 __perf_counter_sched_in(ctx, cpuctx, cpu);
962 cpuctx->task_ctx = ctx;
963 }
964
965 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
966 {
967 struct perf_counter_context *ctx = &cpuctx->ctx;
968
969 __perf_counter_sched_in(ctx, cpuctx, cpu);
970 }
971
972 int perf_counter_task_disable(void)
973 {
974 struct task_struct *curr = current;
975 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
976 struct perf_counter *counter;
977 unsigned long flags;
978
979 if (likely(!ctx->nr_counters))
980 return 0;
981
982 local_irq_save(flags);
983
984 __perf_counter_task_sched_out(ctx);
985
986 spin_lock(&ctx->lock);
987
988 /*
989 * Disable all the counters:
990 */
991 perf_disable();
992
993 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
994 if (counter->state != PERF_COUNTER_STATE_ERROR) {
995 update_group_times(counter);
996 counter->state = PERF_COUNTER_STATE_OFF;
997 }
998 }
999
1000 perf_enable();
1001
1002 spin_unlock_irqrestore(&ctx->lock, flags);
1003
1004 return 0;
1005 }
1006
1007 int perf_counter_task_enable(void)
1008 {
1009 struct task_struct *curr = current;
1010 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
1011 struct perf_counter *counter;
1012 unsigned long flags;
1013 int cpu;
1014
1015 if (likely(!ctx->nr_counters))
1016 return 0;
1017
1018 local_irq_save(flags);
1019 cpu = smp_processor_id();
1020
1021 __perf_counter_task_sched_out(ctx);
1022
1023 spin_lock(&ctx->lock);
1024
1025 /*
1026 * Disable all the counters:
1027 */
1028 perf_disable();
1029
1030 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1031 if (counter->state > PERF_COUNTER_STATE_OFF)
1032 continue;
1033 counter->state = PERF_COUNTER_STATE_INACTIVE;
1034 counter->tstamp_enabled =
1035 ctx->time - counter->total_time_enabled;
1036 counter->hw_event.disabled = 0;
1037 }
1038 perf_enable();
1039
1040 spin_unlock(&ctx->lock);
1041
1042 perf_counter_task_sched_in(curr, cpu);
1043
1044 local_irq_restore(flags);
1045
1046 return 0;
1047 }
1048
1049 void perf_adjust_freq(struct perf_counter_context *ctx)
1050 {
1051 struct perf_counter *counter;
1052 u64 irq_period;
1053 u64 events, period;
1054 s64 delta;
1055
1056 spin_lock(&ctx->lock);
1057 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1058 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1059 continue;
1060
1061 if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
1062 continue;
1063
1064 events = HZ * counter->hw.interrupts * counter->hw.irq_period;
1065 period = div64_u64(events, counter->hw_event.irq_freq);
1066
1067 delta = (s64)(1 + period - counter->hw.irq_period);
1068 delta >>= 1;
1069
1070 irq_period = counter->hw.irq_period + delta;
1071
1072 if (!irq_period)
1073 irq_period = 1;
1074
1075 counter->hw.irq_period = irq_period;
1076 counter->hw.interrupts = 0;
1077 }
1078 spin_unlock(&ctx->lock);
1079 }
1080
1081 /*
1082 * Round-robin a context's counters:
1083 */
1084 static void rotate_ctx(struct perf_counter_context *ctx)
1085 {
1086 struct perf_counter *counter;
1087
1088 if (!ctx->nr_counters)
1089 return;
1090
1091 spin_lock(&ctx->lock);
1092 /*
1093 * Rotate the first entry last (works just fine for group counters too):
1094 */
1095 perf_disable();
1096 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1097 list_move_tail(&counter->list_entry, &ctx->counter_list);
1098 break;
1099 }
1100 perf_enable();
1101
1102 spin_unlock(&ctx->lock);
1103 }
1104
1105 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1106 {
1107 struct perf_cpu_context *cpuctx;
1108 struct perf_counter_context *ctx;
1109
1110 if (!atomic_read(&nr_counters))
1111 return;
1112
1113 cpuctx = &per_cpu(perf_cpu_context, cpu);
1114 ctx = &curr->perf_counter_ctx;
1115
1116 perf_adjust_freq(&cpuctx->ctx);
1117 perf_adjust_freq(ctx);
1118
1119 perf_counter_cpu_sched_out(cpuctx);
1120 __perf_counter_task_sched_out(ctx);
1121
1122 rotate_ctx(&cpuctx->ctx);
1123 rotate_ctx(ctx);
1124
1125 perf_counter_cpu_sched_in(cpuctx, cpu);
1126 perf_counter_task_sched_in(curr, cpu);
1127 }
1128
1129 /*
1130 * Cross CPU call to read the hardware counter
1131 */
1132 static void __read(void *info)
1133 {
1134 struct perf_counter *counter = info;
1135 struct perf_counter_context *ctx = counter->ctx;
1136 unsigned long flags;
1137
1138 local_irq_save(flags);
1139 if (ctx->is_active)
1140 update_context_time(ctx);
1141 counter->pmu->read(counter);
1142 update_counter_times(counter);
1143 local_irq_restore(flags);
1144 }
1145
1146 static u64 perf_counter_read(struct perf_counter *counter)
1147 {
1148 /*
1149 * If counter is enabled and currently active on a CPU, update the
1150 * value in the counter structure:
1151 */
1152 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1153 smp_call_function_single(counter->oncpu,
1154 __read, counter, 1);
1155 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1156 update_counter_times(counter);
1157 }
1158
1159 return atomic64_read(&counter->count);
1160 }
1161
1162 static void put_context(struct perf_counter_context *ctx)
1163 {
1164 if (ctx->task)
1165 put_task_struct(ctx->task);
1166 }
1167
1168 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1169 {
1170 struct perf_cpu_context *cpuctx;
1171 struct perf_counter_context *ctx;
1172 struct task_struct *task;
1173
1174 /*
1175 * If cpu is not a wildcard then this is a percpu counter:
1176 */
1177 if (cpu != -1) {
1178 /* Must be root to operate on a CPU counter: */
1179 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1180 return ERR_PTR(-EACCES);
1181
1182 if (cpu < 0 || cpu > num_possible_cpus())
1183 return ERR_PTR(-EINVAL);
1184
1185 /*
1186 * We could be clever and allow to attach a counter to an
1187 * offline CPU and activate it when the CPU comes up, but
1188 * that's for later.
1189 */
1190 if (!cpu_isset(cpu, cpu_online_map))
1191 return ERR_PTR(-ENODEV);
1192
1193 cpuctx = &per_cpu(perf_cpu_context, cpu);
1194 ctx = &cpuctx->ctx;
1195
1196 return ctx;
1197 }
1198
1199 rcu_read_lock();
1200 if (!pid)
1201 task = current;
1202 else
1203 task = find_task_by_vpid(pid);
1204 if (task)
1205 get_task_struct(task);
1206 rcu_read_unlock();
1207
1208 if (!task)
1209 return ERR_PTR(-ESRCH);
1210
1211 ctx = &task->perf_counter_ctx;
1212 ctx->task = task;
1213
1214 /* Reuse ptrace permission checks for now. */
1215 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1216 put_context(ctx);
1217 return ERR_PTR(-EACCES);
1218 }
1219
1220 return ctx;
1221 }
1222
1223 static void free_counter_rcu(struct rcu_head *head)
1224 {
1225 struct perf_counter *counter;
1226
1227 counter = container_of(head, struct perf_counter, rcu_head);
1228 kfree(counter);
1229 }
1230
1231 static void perf_pending_sync(struct perf_counter *counter);
1232
1233 static void free_counter(struct perf_counter *counter)
1234 {
1235 perf_pending_sync(counter);
1236
1237 atomic_dec(&nr_counters);
1238 if (counter->hw_event.mmap)
1239 atomic_dec(&nr_mmap_tracking);
1240 if (counter->hw_event.munmap)
1241 atomic_dec(&nr_munmap_tracking);
1242 if (counter->hw_event.comm)
1243 atomic_dec(&nr_comm_tracking);
1244
1245 if (counter->destroy)
1246 counter->destroy(counter);
1247
1248 call_rcu(&counter->rcu_head, free_counter_rcu);
1249 }
1250
1251 /*
1252 * Called when the last reference to the file is gone.
1253 */
1254 static int perf_release(struct inode *inode, struct file *file)
1255 {
1256 struct perf_counter *counter = file->private_data;
1257 struct perf_counter_context *ctx = counter->ctx;
1258
1259 file->private_data = NULL;
1260
1261 mutex_lock(&ctx->mutex);
1262 mutex_lock(&counter->mutex);
1263
1264 perf_counter_remove_from_context(counter);
1265
1266 mutex_unlock(&counter->mutex);
1267 mutex_unlock(&ctx->mutex);
1268
1269 free_counter(counter);
1270 put_context(ctx);
1271
1272 return 0;
1273 }
1274
1275 /*
1276 * Read the performance counter - simple non blocking version for now
1277 */
1278 static ssize_t
1279 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1280 {
1281 u64 values[3];
1282 int n;
1283
1284 /*
1285 * Return end-of-file for a read on a counter that is in
1286 * error state (i.e. because it was pinned but it couldn't be
1287 * scheduled on to the CPU at some point).
1288 */
1289 if (counter->state == PERF_COUNTER_STATE_ERROR)
1290 return 0;
1291
1292 mutex_lock(&counter->mutex);
1293 values[0] = perf_counter_read(counter);
1294 n = 1;
1295 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1296 values[n++] = counter->total_time_enabled +
1297 atomic64_read(&counter->child_total_time_enabled);
1298 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1299 values[n++] = counter->total_time_running +
1300 atomic64_read(&counter->child_total_time_running);
1301 mutex_unlock(&counter->mutex);
1302
1303 if (count < n * sizeof(u64))
1304 return -EINVAL;
1305 count = n * sizeof(u64);
1306
1307 if (copy_to_user(buf, values, count))
1308 return -EFAULT;
1309
1310 return count;
1311 }
1312
1313 static ssize_t
1314 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1315 {
1316 struct perf_counter *counter = file->private_data;
1317
1318 return perf_read_hw(counter, buf, count);
1319 }
1320
1321 static unsigned int perf_poll(struct file *file, poll_table *wait)
1322 {
1323 struct perf_counter *counter = file->private_data;
1324 struct perf_mmap_data *data;
1325 unsigned int events = POLL_HUP;
1326
1327 rcu_read_lock();
1328 data = rcu_dereference(counter->data);
1329 if (data)
1330 events = atomic_xchg(&data->poll, 0);
1331 rcu_read_unlock();
1332
1333 poll_wait(file, &counter->waitq, wait);
1334
1335 return events;
1336 }
1337
1338 static void perf_counter_reset(struct perf_counter *counter)
1339 {
1340 (void)perf_counter_read(counter);
1341 atomic64_set(&counter->count, 0);
1342 perf_counter_update_userpage(counter);
1343 }
1344
1345 static void perf_counter_for_each_sibling(struct perf_counter *counter,
1346 void (*func)(struct perf_counter *))
1347 {
1348 struct perf_counter_context *ctx = counter->ctx;
1349 struct perf_counter *sibling;
1350
1351 spin_lock_irq(&ctx->lock);
1352 counter = counter->group_leader;
1353
1354 func(counter);
1355 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1356 func(sibling);
1357 spin_unlock_irq(&ctx->lock);
1358 }
1359
1360 static void perf_counter_for_each_child(struct perf_counter *counter,
1361 void (*func)(struct perf_counter *))
1362 {
1363 struct perf_counter *child;
1364
1365 mutex_lock(&counter->mutex);
1366 func(counter);
1367 list_for_each_entry(child, &counter->child_list, child_list)
1368 func(child);
1369 mutex_unlock(&counter->mutex);
1370 }
1371
1372 static void perf_counter_for_each(struct perf_counter *counter,
1373 void (*func)(struct perf_counter *))
1374 {
1375 struct perf_counter *child;
1376
1377 mutex_lock(&counter->mutex);
1378 perf_counter_for_each_sibling(counter, func);
1379 list_for_each_entry(child, &counter->child_list, child_list)
1380 perf_counter_for_each_sibling(child, func);
1381 mutex_unlock(&counter->mutex);
1382 }
1383
1384 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1385 {
1386 struct perf_counter *counter = file->private_data;
1387 void (*func)(struct perf_counter *);
1388 u32 flags = arg;
1389
1390 switch (cmd) {
1391 case PERF_COUNTER_IOC_ENABLE:
1392 func = perf_counter_enable;
1393 break;
1394 case PERF_COUNTER_IOC_DISABLE:
1395 func = perf_counter_disable;
1396 break;
1397 case PERF_COUNTER_IOC_RESET:
1398 func = perf_counter_reset;
1399 break;
1400
1401 case PERF_COUNTER_IOC_REFRESH:
1402 return perf_counter_refresh(counter, arg);
1403 default:
1404 return -ENOTTY;
1405 }
1406
1407 if (flags & PERF_IOC_FLAG_GROUP)
1408 perf_counter_for_each(counter, func);
1409 else
1410 perf_counter_for_each_child(counter, func);
1411
1412 return 0;
1413 }
1414
1415 /*
1416 * Callers need to ensure there can be no nesting of this function, otherwise
1417 * the seqlock logic goes bad. We can not serialize this because the arch
1418 * code calls this from NMI context.
1419 */
1420 void perf_counter_update_userpage(struct perf_counter *counter)
1421 {
1422 struct perf_mmap_data *data;
1423 struct perf_counter_mmap_page *userpg;
1424
1425 rcu_read_lock();
1426 data = rcu_dereference(counter->data);
1427 if (!data)
1428 goto unlock;
1429
1430 userpg = data->user_page;
1431
1432 /*
1433 * Disable preemption so as to not let the corresponding user-space
1434 * spin too long if we get preempted.
1435 */
1436 preempt_disable();
1437 ++userpg->lock;
1438 barrier();
1439 userpg->index = counter->hw.idx;
1440 userpg->offset = atomic64_read(&counter->count);
1441 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1442 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1443
1444 barrier();
1445 ++userpg->lock;
1446 preempt_enable();
1447 unlock:
1448 rcu_read_unlock();
1449 }
1450
1451 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1452 {
1453 struct perf_counter *counter = vma->vm_file->private_data;
1454 struct perf_mmap_data *data;
1455 int ret = VM_FAULT_SIGBUS;
1456
1457 rcu_read_lock();
1458 data = rcu_dereference(counter->data);
1459 if (!data)
1460 goto unlock;
1461
1462 if (vmf->pgoff == 0) {
1463 vmf->page = virt_to_page(data->user_page);
1464 } else {
1465 int nr = vmf->pgoff - 1;
1466
1467 if ((unsigned)nr > data->nr_pages)
1468 goto unlock;
1469
1470 vmf->page = virt_to_page(data->data_pages[nr]);
1471 }
1472 get_page(vmf->page);
1473 ret = 0;
1474 unlock:
1475 rcu_read_unlock();
1476
1477 return ret;
1478 }
1479
1480 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1481 {
1482 struct perf_mmap_data *data;
1483 unsigned long size;
1484 int i;
1485
1486 WARN_ON(atomic_read(&counter->mmap_count));
1487
1488 size = sizeof(struct perf_mmap_data);
1489 size += nr_pages * sizeof(void *);
1490
1491 data = kzalloc(size, GFP_KERNEL);
1492 if (!data)
1493 goto fail;
1494
1495 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1496 if (!data->user_page)
1497 goto fail_user_page;
1498
1499 for (i = 0; i < nr_pages; i++) {
1500 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1501 if (!data->data_pages[i])
1502 goto fail_data_pages;
1503 }
1504
1505 data->nr_pages = nr_pages;
1506 atomic_set(&data->lock, -1);
1507
1508 rcu_assign_pointer(counter->data, data);
1509
1510 return 0;
1511
1512 fail_data_pages:
1513 for (i--; i >= 0; i--)
1514 free_page((unsigned long)data->data_pages[i]);
1515
1516 free_page((unsigned long)data->user_page);
1517
1518 fail_user_page:
1519 kfree(data);
1520
1521 fail:
1522 return -ENOMEM;
1523 }
1524
1525 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1526 {
1527 struct perf_mmap_data *data = container_of(rcu_head,
1528 struct perf_mmap_data, rcu_head);
1529 int i;
1530
1531 free_page((unsigned long)data->user_page);
1532 for (i = 0; i < data->nr_pages; i++)
1533 free_page((unsigned long)data->data_pages[i]);
1534 kfree(data);
1535 }
1536
1537 static void perf_mmap_data_free(struct perf_counter *counter)
1538 {
1539 struct perf_mmap_data *data = counter->data;
1540
1541 WARN_ON(atomic_read(&counter->mmap_count));
1542
1543 rcu_assign_pointer(counter->data, NULL);
1544 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1545 }
1546
1547 static void perf_mmap_open(struct vm_area_struct *vma)
1548 {
1549 struct perf_counter *counter = vma->vm_file->private_data;
1550
1551 atomic_inc(&counter->mmap_count);
1552 }
1553
1554 static void perf_mmap_close(struct vm_area_struct *vma)
1555 {
1556 struct perf_counter *counter = vma->vm_file->private_data;
1557
1558 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1559 &counter->mmap_mutex)) {
1560 struct user_struct *user = current_user();
1561
1562 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1563 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1564 perf_mmap_data_free(counter);
1565 mutex_unlock(&counter->mmap_mutex);
1566 }
1567 }
1568
1569 static struct vm_operations_struct perf_mmap_vmops = {
1570 .open = perf_mmap_open,
1571 .close = perf_mmap_close,
1572 .fault = perf_mmap_fault,
1573 };
1574
1575 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1576 {
1577 struct perf_counter *counter = file->private_data;
1578 struct user_struct *user = current_user();
1579 unsigned long vma_size;
1580 unsigned long nr_pages;
1581 unsigned long user_locked, user_lock_limit;
1582 unsigned long locked, lock_limit;
1583 long user_extra, extra;
1584 int ret = 0;
1585
1586 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1587 return -EINVAL;
1588
1589 vma_size = vma->vm_end - vma->vm_start;
1590 nr_pages = (vma_size / PAGE_SIZE) - 1;
1591
1592 /*
1593 * If we have data pages ensure they're a power-of-two number, so we
1594 * can do bitmasks instead of modulo.
1595 */
1596 if (nr_pages != 0 && !is_power_of_2(nr_pages))
1597 return -EINVAL;
1598
1599 if (vma_size != PAGE_SIZE * (1 + nr_pages))
1600 return -EINVAL;
1601
1602 if (vma->vm_pgoff != 0)
1603 return -EINVAL;
1604
1605 mutex_lock(&counter->mmap_mutex);
1606 if (atomic_inc_not_zero(&counter->mmap_count)) {
1607 if (nr_pages != counter->data->nr_pages)
1608 ret = -EINVAL;
1609 goto unlock;
1610 }
1611
1612 user_extra = nr_pages + 1;
1613 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1614 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1615
1616 extra = 0;
1617 if (user_locked > user_lock_limit)
1618 extra = user_locked - user_lock_limit;
1619
1620 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1621 lock_limit >>= PAGE_SHIFT;
1622 locked = vma->vm_mm->locked_vm + extra;
1623
1624 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1625 ret = -EPERM;
1626 goto unlock;
1627 }
1628
1629 WARN_ON(counter->data);
1630 ret = perf_mmap_data_alloc(counter, nr_pages);
1631 if (ret)
1632 goto unlock;
1633
1634 atomic_set(&counter->mmap_count, 1);
1635 atomic_long_add(user_extra, &user->locked_vm);
1636 vma->vm_mm->locked_vm += extra;
1637 counter->data->nr_locked = extra;
1638 unlock:
1639 mutex_unlock(&counter->mmap_mutex);
1640
1641 vma->vm_flags &= ~VM_MAYWRITE;
1642 vma->vm_flags |= VM_RESERVED;
1643 vma->vm_ops = &perf_mmap_vmops;
1644
1645 return ret;
1646 }
1647
1648 static int perf_fasync(int fd, struct file *filp, int on)
1649 {
1650 struct perf_counter *counter = filp->private_data;
1651 struct inode *inode = filp->f_path.dentry->d_inode;
1652 int retval;
1653
1654 mutex_lock(&inode->i_mutex);
1655 retval = fasync_helper(fd, filp, on, &counter->fasync);
1656 mutex_unlock(&inode->i_mutex);
1657
1658 if (retval < 0)
1659 return retval;
1660
1661 return 0;
1662 }
1663
1664 static const struct file_operations perf_fops = {
1665 .release = perf_release,
1666 .read = perf_read,
1667 .poll = perf_poll,
1668 .unlocked_ioctl = perf_ioctl,
1669 .compat_ioctl = perf_ioctl,
1670 .mmap = perf_mmap,
1671 .fasync = perf_fasync,
1672 };
1673
1674 /*
1675 * Perf counter wakeup
1676 *
1677 * If there's data, ensure we set the poll() state and publish everything
1678 * to user-space before waking everybody up.
1679 */
1680
1681 void perf_counter_wakeup(struct perf_counter *counter)
1682 {
1683 wake_up_all(&counter->waitq);
1684
1685 if (counter->pending_kill) {
1686 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1687 counter->pending_kill = 0;
1688 }
1689 }
1690
1691 /*
1692 * Pending wakeups
1693 *
1694 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1695 *
1696 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1697 * single linked list and use cmpxchg() to add entries lockless.
1698 */
1699
1700 static void perf_pending_counter(struct perf_pending_entry *entry)
1701 {
1702 struct perf_counter *counter = container_of(entry,
1703 struct perf_counter, pending);
1704
1705 if (counter->pending_disable) {
1706 counter->pending_disable = 0;
1707 perf_counter_disable(counter);
1708 }
1709
1710 if (counter->pending_wakeup) {
1711 counter->pending_wakeup = 0;
1712 perf_counter_wakeup(counter);
1713 }
1714 }
1715
1716 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1717
1718 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
1719 PENDING_TAIL,
1720 };
1721
1722 static void perf_pending_queue(struct perf_pending_entry *entry,
1723 void (*func)(struct perf_pending_entry *))
1724 {
1725 struct perf_pending_entry **head;
1726
1727 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
1728 return;
1729
1730 entry->func = func;
1731
1732 head = &get_cpu_var(perf_pending_head);
1733
1734 do {
1735 entry->next = *head;
1736 } while (cmpxchg(head, entry->next, entry) != entry->next);
1737
1738 set_perf_counter_pending();
1739
1740 put_cpu_var(perf_pending_head);
1741 }
1742
1743 static int __perf_pending_run(void)
1744 {
1745 struct perf_pending_entry *list;
1746 int nr = 0;
1747
1748 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
1749 while (list != PENDING_TAIL) {
1750 void (*func)(struct perf_pending_entry *);
1751 struct perf_pending_entry *entry = list;
1752
1753 list = list->next;
1754
1755 func = entry->func;
1756 entry->next = NULL;
1757 /*
1758 * Ensure we observe the unqueue before we issue the wakeup,
1759 * so that we won't be waiting forever.
1760 * -- see perf_not_pending().
1761 */
1762 smp_wmb();
1763
1764 func(entry);
1765 nr++;
1766 }
1767
1768 return nr;
1769 }
1770
1771 static inline int perf_not_pending(struct perf_counter *counter)
1772 {
1773 /*
1774 * If we flush on whatever cpu we run, there is a chance we don't
1775 * need to wait.
1776 */
1777 get_cpu();
1778 __perf_pending_run();
1779 put_cpu();
1780
1781 /*
1782 * Ensure we see the proper queue state before going to sleep
1783 * so that we do not miss the wakeup. -- see perf_pending_handle()
1784 */
1785 smp_rmb();
1786 return counter->pending.next == NULL;
1787 }
1788
1789 static void perf_pending_sync(struct perf_counter *counter)
1790 {
1791 wait_event(counter->waitq, perf_not_pending(counter));
1792 }
1793
1794 void perf_counter_do_pending(void)
1795 {
1796 __perf_pending_run();
1797 }
1798
1799 /*
1800 * Callchain support -- arch specific
1801 */
1802
1803 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1804 {
1805 return NULL;
1806 }
1807
1808 /*
1809 * Output
1810 */
1811
1812 struct perf_output_handle {
1813 struct perf_counter *counter;
1814 struct perf_mmap_data *data;
1815 unsigned int offset;
1816 unsigned int head;
1817 int nmi;
1818 int overflow;
1819 int locked;
1820 unsigned long flags;
1821 };
1822
1823 static void perf_output_wakeup(struct perf_output_handle *handle)
1824 {
1825 atomic_set(&handle->data->poll, POLL_IN);
1826
1827 if (handle->nmi) {
1828 handle->counter->pending_wakeup = 1;
1829 perf_pending_queue(&handle->counter->pending,
1830 perf_pending_counter);
1831 } else
1832 perf_counter_wakeup(handle->counter);
1833 }
1834
1835 /*
1836 * Curious locking construct.
1837 *
1838 * We need to ensure a later event doesn't publish a head when a former
1839 * event isn't done writing. However since we need to deal with NMIs we
1840 * cannot fully serialize things.
1841 *
1842 * What we do is serialize between CPUs so we only have to deal with NMI
1843 * nesting on a single CPU.
1844 *
1845 * We only publish the head (and generate a wakeup) when the outer-most
1846 * event completes.
1847 */
1848 static void perf_output_lock(struct perf_output_handle *handle)
1849 {
1850 struct perf_mmap_data *data = handle->data;
1851 int cpu;
1852
1853 handle->locked = 0;
1854
1855 local_irq_save(handle->flags);
1856 cpu = smp_processor_id();
1857
1858 if (in_nmi() && atomic_read(&data->lock) == cpu)
1859 return;
1860
1861 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
1862 cpu_relax();
1863
1864 handle->locked = 1;
1865 }
1866
1867 static void perf_output_unlock(struct perf_output_handle *handle)
1868 {
1869 struct perf_mmap_data *data = handle->data;
1870 int head, cpu;
1871
1872 data->done_head = data->head;
1873
1874 if (!handle->locked)
1875 goto out;
1876
1877 again:
1878 /*
1879 * The xchg implies a full barrier that ensures all writes are done
1880 * before we publish the new head, matched by a rmb() in userspace when
1881 * reading this position.
1882 */
1883 while ((head = atomic_xchg(&data->done_head, 0)))
1884 data->user_page->data_head = head;
1885
1886 /*
1887 * NMI can happen here, which means we can miss a done_head update.
1888 */
1889
1890 cpu = atomic_xchg(&data->lock, -1);
1891 WARN_ON_ONCE(cpu != smp_processor_id());
1892
1893 /*
1894 * Therefore we have to validate we did not indeed do so.
1895 */
1896 if (unlikely(atomic_read(&data->done_head))) {
1897 /*
1898 * Since we had it locked, we can lock it again.
1899 */
1900 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
1901 cpu_relax();
1902
1903 goto again;
1904 }
1905
1906 if (atomic_xchg(&data->wakeup, 0))
1907 perf_output_wakeup(handle);
1908 out:
1909 local_irq_restore(handle->flags);
1910 }
1911
1912 static int perf_output_begin(struct perf_output_handle *handle,
1913 struct perf_counter *counter, unsigned int size,
1914 int nmi, int overflow)
1915 {
1916 struct perf_mmap_data *data;
1917 unsigned int offset, head;
1918
1919 /*
1920 * For inherited counters we send all the output towards the parent.
1921 */
1922 if (counter->parent)
1923 counter = counter->parent;
1924
1925 rcu_read_lock();
1926 data = rcu_dereference(counter->data);
1927 if (!data)
1928 goto out;
1929
1930 handle->data = data;
1931 handle->counter = counter;
1932 handle->nmi = nmi;
1933 handle->overflow = overflow;
1934
1935 if (!data->nr_pages)
1936 goto fail;
1937
1938 perf_output_lock(handle);
1939
1940 do {
1941 offset = head = atomic_read(&data->head);
1942 head += size;
1943 } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1944
1945 handle->offset = offset;
1946 handle->head = head;
1947
1948 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
1949 atomic_set(&data->wakeup, 1);
1950
1951 return 0;
1952
1953 fail:
1954 perf_output_wakeup(handle);
1955 out:
1956 rcu_read_unlock();
1957
1958 return -ENOSPC;
1959 }
1960
1961 static void perf_output_copy(struct perf_output_handle *handle,
1962 void *buf, unsigned int len)
1963 {
1964 unsigned int pages_mask;
1965 unsigned int offset;
1966 unsigned int size;
1967 void **pages;
1968
1969 offset = handle->offset;
1970 pages_mask = handle->data->nr_pages - 1;
1971 pages = handle->data->data_pages;
1972
1973 do {
1974 unsigned int page_offset;
1975 int nr;
1976
1977 nr = (offset >> PAGE_SHIFT) & pages_mask;
1978 page_offset = offset & (PAGE_SIZE - 1);
1979 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
1980
1981 memcpy(pages[nr] + page_offset, buf, size);
1982
1983 len -= size;
1984 buf += size;
1985 offset += size;
1986 } while (len);
1987
1988 handle->offset = offset;
1989
1990 /*
1991 * Check we didn't copy past our reservation window, taking the
1992 * possible unsigned int wrap into account.
1993 */
1994 WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0);
1995 }
1996
1997 #define perf_output_put(handle, x) \
1998 perf_output_copy((handle), &(x), sizeof(x))
1999
2000 static void perf_output_end(struct perf_output_handle *handle)
2001 {
2002 struct perf_counter *counter = handle->counter;
2003 struct perf_mmap_data *data = handle->data;
2004
2005 int wakeup_events = counter->hw_event.wakeup_events;
2006
2007 if (handle->overflow && wakeup_events) {
2008 int events = atomic_inc_return(&data->events);
2009 if (events >= wakeup_events) {
2010 atomic_sub(wakeup_events, &data->events);
2011 atomic_set(&data->wakeup, 1);
2012 }
2013 }
2014
2015 perf_output_unlock(handle);
2016 rcu_read_unlock();
2017 }
2018
2019 static void perf_counter_output(struct perf_counter *counter,
2020 int nmi, struct pt_regs *regs, u64 addr)
2021 {
2022 int ret;
2023 u64 record_type = counter->hw_event.record_type;
2024 struct perf_output_handle handle;
2025 struct perf_event_header header;
2026 u64 ip;
2027 struct {
2028 u32 pid, tid;
2029 } tid_entry;
2030 struct {
2031 u64 event;
2032 u64 counter;
2033 } group_entry;
2034 struct perf_callchain_entry *callchain = NULL;
2035 int callchain_size = 0;
2036 u64 time;
2037 struct {
2038 u32 cpu, reserved;
2039 } cpu_entry;
2040
2041 header.type = 0;
2042 header.size = sizeof(header);
2043
2044 header.misc = PERF_EVENT_MISC_OVERFLOW;
2045 header.misc |= user_mode(regs) ?
2046 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
2047
2048 if (record_type & PERF_RECORD_IP) {
2049 ip = instruction_pointer(regs);
2050 header.type |= PERF_RECORD_IP;
2051 header.size += sizeof(ip);
2052 }
2053
2054 if (record_type & PERF_RECORD_TID) {
2055 /* namespace issues */
2056 tid_entry.pid = current->group_leader->pid;
2057 tid_entry.tid = current->pid;
2058
2059 header.type |= PERF_RECORD_TID;
2060 header.size += sizeof(tid_entry);
2061 }
2062
2063 if (record_type & PERF_RECORD_TIME) {
2064 /*
2065 * Maybe do better on x86 and provide cpu_clock_nmi()
2066 */
2067 time = sched_clock();
2068
2069 header.type |= PERF_RECORD_TIME;
2070 header.size += sizeof(u64);
2071 }
2072
2073 if (record_type & PERF_RECORD_ADDR) {
2074 header.type |= PERF_RECORD_ADDR;
2075 header.size += sizeof(u64);
2076 }
2077
2078 if (record_type & PERF_RECORD_CONFIG) {
2079 header.type |= PERF_RECORD_CONFIG;
2080 header.size += sizeof(u64);
2081 }
2082
2083 if (record_type & PERF_RECORD_CPU) {
2084 header.type |= PERF_RECORD_CPU;
2085 header.size += sizeof(cpu_entry);
2086
2087 cpu_entry.cpu = raw_smp_processor_id();
2088 }
2089
2090 if (record_type & PERF_RECORD_GROUP) {
2091 header.type |= PERF_RECORD_GROUP;
2092 header.size += sizeof(u64) +
2093 counter->nr_siblings * sizeof(group_entry);
2094 }
2095
2096 if (record_type & PERF_RECORD_CALLCHAIN) {
2097 callchain = perf_callchain(regs);
2098
2099 if (callchain) {
2100 callchain_size = (1 + callchain->nr) * sizeof(u64);
2101
2102 header.type |= PERF_RECORD_CALLCHAIN;
2103 header.size += callchain_size;
2104 }
2105 }
2106
2107 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2108 if (ret)
2109 return;
2110
2111 perf_output_put(&handle, header);
2112
2113 if (record_type & PERF_RECORD_IP)
2114 perf_output_put(&handle, ip);
2115
2116 if (record_type & PERF_RECORD_TID)
2117 perf_output_put(&handle, tid_entry);
2118
2119 if (record_type & PERF_RECORD_TIME)
2120 perf_output_put(&handle, time);
2121
2122 if (record_type & PERF_RECORD_ADDR)
2123 perf_output_put(&handle, addr);
2124
2125 if (record_type & PERF_RECORD_CONFIG)
2126 perf_output_put(&handle, counter->hw_event.config);
2127
2128 if (record_type & PERF_RECORD_CPU)
2129 perf_output_put(&handle, cpu_entry);
2130
2131 /*
2132 * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
2133 */
2134 if (record_type & PERF_RECORD_GROUP) {
2135 struct perf_counter *leader, *sub;
2136 u64 nr = counter->nr_siblings;
2137
2138 perf_output_put(&handle, nr);
2139
2140 leader = counter->group_leader;
2141 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2142 if (sub != counter)
2143 sub->pmu->read(sub);
2144
2145 group_entry.event = sub->hw_event.config;
2146 group_entry.counter = atomic64_read(&sub->count);
2147
2148 perf_output_put(&handle, group_entry);
2149 }
2150 }
2151
2152 if (callchain)
2153 perf_output_copy(&handle, callchain, callchain_size);
2154
2155 perf_output_end(&handle);
2156 }
2157
2158 /*
2159 * comm tracking
2160 */
2161
2162 struct perf_comm_event {
2163 struct task_struct *task;
2164 char *comm;
2165 int comm_size;
2166
2167 struct {
2168 struct perf_event_header header;
2169
2170 u32 pid;
2171 u32 tid;
2172 } event;
2173 };
2174
2175 static void perf_counter_comm_output(struct perf_counter *counter,
2176 struct perf_comm_event *comm_event)
2177 {
2178 struct perf_output_handle handle;
2179 int size = comm_event->event.header.size;
2180 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2181
2182 if (ret)
2183 return;
2184
2185 perf_output_put(&handle, comm_event->event);
2186 perf_output_copy(&handle, comm_event->comm,
2187 comm_event->comm_size);
2188 perf_output_end(&handle);
2189 }
2190
2191 static int perf_counter_comm_match(struct perf_counter *counter,
2192 struct perf_comm_event *comm_event)
2193 {
2194 if (counter->hw_event.comm &&
2195 comm_event->event.header.type == PERF_EVENT_COMM)
2196 return 1;
2197
2198 return 0;
2199 }
2200
2201 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2202 struct perf_comm_event *comm_event)
2203 {
2204 struct perf_counter *counter;
2205
2206 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2207 return;
2208
2209 rcu_read_lock();
2210 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2211 if (perf_counter_comm_match(counter, comm_event))
2212 perf_counter_comm_output(counter, comm_event);
2213 }
2214 rcu_read_unlock();
2215 }
2216
2217 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2218 {
2219 struct perf_cpu_context *cpuctx;
2220 unsigned int size;
2221 char *comm = comm_event->task->comm;
2222
2223 size = ALIGN(strlen(comm)+1, sizeof(u64));
2224
2225 comm_event->comm = comm;
2226 comm_event->comm_size = size;
2227
2228 comm_event->event.header.size = sizeof(comm_event->event) + size;
2229
2230 cpuctx = &get_cpu_var(perf_cpu_context);
2231 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2232 put_cpu_var(perf_cpu_context);
2233
2234 perf_counter_comm_ctx(&current->perf_counter_ctx, comm_event);
2235 }
2236
2237 void perf_counter_comm(struct task_struct *task)
2238 {
2239 struct perf_comm_event comm_event;
2240
2241 if (!atomic_read(&nr_comm_tracking))
2242 return;
2243
2244 comm_event = (struct perf_comm_event){
2245 .task = task,
2246 .event = {
2247 .header = { .type = PERF_EVENT_COMM, },
2248 .pid = task->group_leader->pid,
2249 .tid = task->pid,
2250 },
2251 };
2252
2253 perf_counter_comm_event(&comm_event);
2254 }
2255
2256 /*
2257 * mmap tracking
2258 */
2259
2260 struct perf_mmap_event {
2261 struct file *file;
2262 char *file_name;
2263 int file_size;
2264
2265 struct {
2266 struct perf_event_header header;
2267
2268 u32 pid;
2269 u32 tid;
2270 u64 start;
2271 u64 len;
2272 u64 pgoff;
2273 } event;
2274 };
2275
2276 static void perf_counter_mmap_output(struct perf_counter *counter,
2277 struct perf_mmap_event *mmap_event)
2278 {
2279 struct perf_output_handle handle;
2280 int size = mmap_event->event.header.size;
2281 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2282
2283 if (ret)
2284 return;
2285
2286 perf_output_put(&handle, mmap_event->event);
2287 perf_output_copy(&handle, mmap_event->file_name,
2288 mmap_event->file_size);
2289 perf_output_end(&handle);
2290 }
2291
2292 static int perf_counter_mmap_match(struct perf_counter *counter,
2293 struct perf_mmap_event *mmap_event)
2294 {
2295 if (counter->hw_event.mmap &&
2296 mmap_event->event.header.type == PERF_EVENT_MMAP)
2297 return 1;
2298
2299 if (counter->hw_event.munmap &&
2300 mmap_event->event.header.type == PERF_EVENT_MUNMAP)
2301 return 1;
2302
2303 return 0;
2304 }
2305
2306 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2307 struct perf_mmap_event *mmap_event)
2308 {
2309 struct perf_counter *counter;
2310
2311 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2312 return;
2313
2314 rcu_read_lock();
2315 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2316 if (perf_counter_mmap_match(counter, mmap_event))
2317 perf_counter_mmap_output(counter, mmap_event);
2318 }
2319 rcu_read_unlock();
2320 }
2321
2322 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2323 {
2324 struct perf_cpu_context *cpuctx;
2325 struct file *file = mmap_event->file;
2326 unsigned int size;
2327 char tmp[16];
2328 char *buf = NULL;
2329 char *name;
2330
2331 if (file) {
2332 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2333 if (!buf) {
2334 name = strncpy(tmp, "//enomem", sizeof(tmp));
2335 goto got_name;
2336 }
2337 name = d_path(&file->f_path, buf, PATH_MAX);
2338 if (IS_ERR(name)) {
2339 name = strncpy(tmp, "//toolong", sizeof(tmp));
2340 goto got_name;
2341 }
2342 } else {
2343 name = strncpy(tmp, "//anon", sizeof(tmp));
2344 goto got_name;
2345 }
2346
2347 got_name:
2348 size = ALIGN(strlen(name)+1, sizeof(u64));
2349
2350 mmap_event->file_name = name;
2351 mmap_event->file_size = size;
2352
2353 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2354
2355 cpuctx = &get_cpu_var(perf_cpu_context);
2356 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2357 put_cpu_var(perf_cpu_context);
2358
2359 perf_counter_mmap_ctx(&current->perf_counter_ctx, mmap_event);
2360
2361 kfree(buf);
2362 }
2363
2364 void perf_counter_mmap(unsigned long addr, unsigned long len,
2365 unsigned long pgoff, struct file *file)
2366 {
2367 struct perf_mmap_event mmap_event;
2368
2369 if (!atomic_read(&nr_mmap_tracking))
2370 return;
2371
2372 mmap_event = (struct perf_mmap_event){
2373 .file = file,
2374 .event = {
2375 .header = { .type = PERF_EVENT_MMAP, },
2376 .pid = current->group_leader->pid,
2377 .tid = current->pid,
2378 .start = addr,
2379 .len = len,
2380 .pgoff = pgoff,
2381 },
2382 };
2383
2384 perf_counter_mmap_event(&mmap_event);
2385 }
2386
2387 void perf_counter_munmap(unsigned long addr, unsigned long len,
2388 unsigned long pgoff, struct file *file)
2389 {
2390 struct perf_mmap_event mmap_event;
2391
2392 if (!atomic_read(&nr_munmap_tracking))
2393 return;
2394
2395 mmap_event = (struct perf_mmap_event){
2396 .file = file,
2397 .event = {
2398 .header = { .type = PERF_EVENT_MUNMAP, },
2399 .pid = current->group_leader->pid,
2400 .tid = current->pid,
2401 .start = addr,
2402 .len = len,
2403 .pgoff = pgoff,
2404 },
2405 };
2406
2407 perf_counter_mmap_event(&mmap_event);
2408 }
2409
2410 /*
2411 * Generic counter overflow handling.
2412 */
2413
2414 int perf_counter_overflow(struct perf_counter *counter,
2415 int nmi, struct pt_regs *regs, u64 addr)
2416 {
2417 int events = atomic_read(&counter->event_limit);
2418 int ret = 0;
2419
2420 counter->hw.interrupts++;
2421
2422 /*
2423 * XXX event_limit might not quite work as expected on inherited
2424 * counters
2425 */
2426
2427 counter->pending_kill = POLL_IN;
2428 if (events && atomic_dec_and_test(&counter->event_limit)) {
2429 ret = 1;
2430 counter->pending_kill = POLL_HUP;
2431 if (nmi) {
2432 counter->pending_disable = 1;
2433 perf_pending_queue(&counter->pending,
2434 perf_pending_counter);
2435 } else
2436 perf_counter_disable(counter);
2437 }
2438
2439 perf_counter_output(counter, nmi, regs, addr);
2440 return ret;
2441 }
2442
2443 /*
2444 * Generic software counter infrastructure
2445 */
2446
2447 static void perf_swcounter_update(struct perf_counter *counter)
2448 {
2449 struct hw_perf_counter *hwc = &counter->hw;
2450 u64 prev, now;
2451 s64 delta;
2452
2453 again:
2454 prev = atomic64_read(&hwc->prev_count);
2455 now = atomic64_read(&hwc->count);
2456 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2457 goto again;
2458
2459 delta = now - prev;
2460
2461 atomic64_add(delta, &counter->count);
2462 atomic64_sub(delta, &hwc->period_left);
2463 }
2464
2465 static void perf_swcounter_set_period(struct perf_counter *counter)
2466 {
2467 struct hw_perf_counter *hwc = &counter->hw;
2468 s64 left = atomic64_read(&hwc->period_left);
2469 s64 period = hwc->irq_period;
2470
2471 if (unlikely(left <= -period)) {
2472 left = period;
2473 atomic64_set(&hwc->period_left, left);
2474 }
2475
2476 if (unlikely(left <= 0)) {
2477 left += period;
2478 atomic64_add(period, &hwc->period_left);
2479 }
2480
2481 atomic64_set(&hwc->prev_count, -left);
2482 atomic64_set(&hwc->count, -left);
2483 }
2484
2485 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2486 {
2487 enum hrtimer_restart ret = HRTIMER_RESTART;
2488 struct perf_counter *counter;
2489 struct pt_regs *regs;
2490 u64 period;
2491
2492 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
2493 counter->pmu->read(counter);
2494
2495 regs = get_irq_regs();
2496 /*
2497 * In case we exclude kernel IPs or are somehow not in interrupt
2498 * context, provide the next best thing, the user IP.
2499 */
2500 if ((counter->hw_event.exclude_kernel || !regs) &&
2501 !counter->hw_event.exclude_user)
2502 regs = task_pt_regs(current);
2503
2504 if (regs) {
2505 if (perf_counter_overflow(counter, 0, regs, 0))
2506 ret = HRTIMER_NORESTART;
2507 }
2508
2509 period = max_t(u64, 10000, counter->hw.irq_period);
2510 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
2511
2512 return ret;
2513 }
2514
2515 static void perf_swcounter_overflow(struct perf_counter *counter,
2516 int nmi, struct pt_regs *regs, u64 addr)
2517 {
2518 perf_swcounter_update(counter);
2519 perf_swcounter_set_period(counter);
2520 if (perf_counter_overflow(counter, nmi, regs, addr))
2521 /* soft-disable the counter */
2522 ;
2523
2524 }
2525
2526 static int perf_swcounter_match(struct perf_counter *counter,
2527 enum perf_event_types type,
2528 u32 event, struct pt_regs *regs)
2529 {
2530 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2531 return 0;
2532
2533 if (perf_event_raw(&counter->hw_event))
2534 return 0;
2535
2536 if (perf_event_type(&counter->hw_event) != type)
2537 return 0;
2538
2539 if (perf_event_id(&counter->hw_event) != event)
2540 return 0;
2541
2542 if (counter->hw_event.exclude_user && user_mode(regs))
2543 return 0;
2544
2545 if (counter->hw_event.exclude_kernel && !user_mode(regs))
2546 return 0;
2547
2548 return 1;
2549 }
2550
2551 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2552 int nmi, struct pt_regs *regs, u64 addr)
2553 {
2554 int neg = atomic64_add_negative(nr, &counter->hw.count);
2555 if (counter->hw.irq_period && !neg)
2556 perf_swcounter_overflow(counter, nmi, regs, addr);
2557 }
2558
2559 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
2560 enum perf_event_types type, u32 event,
2561 u64 nr, int nmi, struct pt_regs *regs,
2562 u64 addr)
2563 {
2564 struct perf_counter *counter;
2565
2566 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2567 return;
2568
2569 rcu_read_lock();
2570 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2571 if (perf_swcounter_match(counter, type, event, regs))
2572 perf_swcounter_add(counter, nr, nmi, regs, addr);
2573 }
2574 rcu_read_unlock();
2575 }
2576
2577 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
2578 {
2579 if (in_nmi())
2580 return &cpuctx->recursion[3];
2581
2582 if (in_irq())
2583 return &cpuctx->recursion[2];
2584
2585 if (in_softirq())
2586 return &cpuctx->recursion[1];
2587
2588 return &cpuctx->recursion[0];
2589 }
2590
2591 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2592 u64 nr, int nmi, struct pt_regs *regs,
2593 u64 addr)
2594 {
2595 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
2596 int *recursion = perf_swcounter_recursion_context(cpuctx);
2597
2598 if (*recursion)
2599 goto out;
2600
2601 (*recursion)++;
2602 barrier();
2603
2604 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
2605 nr, nmi, regs, addr);
2606 if (cpuctx->task_ctx) {
2607 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
2608 nr, nmi, regs, addr);
2609 }
2610
2611 barrier();
2612 (*recursion)--;
2613
2614 out:
2615 put_cpu_var(perf_cpu_context);
2616 }
2617
2618 void
2619 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
2620 {
2621 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
2622 }
2623
2624 static void perf_swcounter_read(struct perf_counter *counter)
2625 {
2626 perf_swcounter_update(counter);
2627 }
2628
2629 static int perf_swcounter_enable(struct perf_counter *counter)
2630 {
2631 perf_swcounter_set_period(counter);
2632 return 0;
2633 }
2634
2635 static void perf_swcounter_disable(struct perf_counter *counter)
2636 {
2637 perf_swcounter_update(counter);
2638 }
2639
2640 static const struct pmu perf_ops_generic = {
2641 .enable = perf_swcounter_enable,
2642 .disable = perf_swcounter_disable,
2643 .read = perf_swcounter_read,
2644 };
2645
2646 /*
2647 * Software counter: cpu wall time clock
2648 */
2649
2650 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
2651 {
2652 int cpu = raw_smp_processor_id();
2653 s64 prev;
2654 u64 now;
2655
2656 now = cpu_clock(cpu);
2657 prev = atomic64_read(&counter->hw.prev_count);
2658 atomic64_set(&counter->hw.prev_count, now);
2659 atomic64_add(now - prev, &counter->count);
2660 }
2661
2662 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
2663 {
2664 struct hw_perf_counter *hwc = &counter->hw;
2665 int cpu = raw_smp_processor_id();
2666
2667 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
2668 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2669 hwc->hrtimer.function = perf_swcounter_hrtimer;
2670 if (hwc->irq_period) {
2671 u64 period = max_t(u64, 10000, hwc->irq_period);
2672 __hrtimer_start_range_ns(&hwc->hrtimer,
2673 ns_to_ktime(period), 0,
2674 HRTIMER_MODE_REL, 0);
2675 }
2676
2677 return 0;
2678 }
2679
2680 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
2681 {
2682 hrtimer_cancel(&counter->hw.hrtimer);
2683 cpu_clock_perf_counter_update(counter);
2684 }
2685
2686 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
2687 {
2688 cpu_clock_perf_counter_update(counter);
2689 }
2690
2691 static const struct pmu perf_ops_cpu_clock = {
2692 .enable = cpu_clock_perf_counter_enable,
2693 .disable = cpu_clock_perf_counter_disable,
2694 .read = cpu_clock_perf_counter_read,
2695 };
2696
2697 /*
2698 * Software counter: task time clock
2699 */
2700
2701 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
2702 {
2703 u64 prev;
2704 s64 delta;
2705
2706 prev = atomic64_xchg(&counter->hw.prev_count, now);
2707 delta = now - prev;
2708 atomic64_add(delta, &counter->count);
2709 }
2710
2711 static int task_clock_perf_counter_enable(struct perf_counter *counter)
2712 {
2713 struct hw_perf_counter *hwc = &counter->hw;
2714 u64 now;
2715
2716 now = counter->ctx->time;
2717
2718 atomic64_set(&hwc->prev_count, now);
2719 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2720 hwc->hrtimer.function = perf_swcounter_hrtimer;
2721 if (hwc->irq_period) {
2722 u64 period = max_t(u64, 10000, hwc->irq_period);
2723 __hrtimer_start_range_ns(&hwc->hrtimer,
2724 ns_to_ktime(period), 0,
2725 HRTIMER_MODE_REL, 0);
2726 }
2727
2728 return 0;
2729 }
2730
2731 static void task_clock_perf_counter_disable(struct perf_counter *counter)
2732 {
2733 hrtimer_cancel(&counter->hw.hrtimer);
2734 task_clock_perf_counter_update(counter, counter->ctx->time);
2735
2736 }
2737
2738 static void task_clock_perf_counter_read(struct perf_counter *counter)
2739 {
2740 u64 time;
2741
2742 if (!in_nmi()) {
2743 update_context_time(counter->ctx);
2744 time = counter->ctx->time;
2745 } else {
2746 u64 now = perf_clock();
2747 u64 delta = now - counter->ctx->timestamp;
2748 time = counter->ctx->time + delta;
2749 }
2750
2751 task_clock_perf_counter_update(counter, time);
2752 }
2753
2754 static const struct pmu perf_ops_task_clock = {
2755 .enable = task_clock_perf_counter_enable,
2756 .disable = task_clock_perf_counter_disable,
2757 .read = task_clock_perf_counter_read,
2758 };
2759
2760 /*
2761 * Software counter: cpu migrations
2762 */
2763
2764 static inline u64 get_cpu_migrations(struct perf_counter *counter)
2765 {
2766 struct task_struct *curr = counter->ctx->task;
2767
2768 if (curr)
2769 return curr->se.nr_migrations;
2770 return cpu_nr_migrations(smp_processor_id());
2771 }
2772
2773 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
2774 {
2775 u64 prev, now;
2776 s64 delta;
2777
2778 prev = atomic64_read(&counter->hw.prev_count);
2779 now = get_cpu_migrations(counter);
2780
2781 atomic64_set(&counter->hw.prev_count, now);
2782
2783 delta = now - prev;
2784
2785 atomic64_add(delta, &counter->count);
2786 }
2787
2788 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
2789 {
2790 cpu_migrations_perf_counter_update(counter);
2791 }
2792
2793 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
2794 {
2795 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
2796 atomic64_set(&counter->hw.prev_count,
2797 get_cpu_migrations(counter));
2798 return 0;
2799 }
2800
2801 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
2802 {
2803 cpu_migrations_perf_counter_update(counter);
2804 }
2805
2806 static const struct pmu perf_ops_cpu_migrations = {
2807 .enable = cpu_migrations_perf_counter_enable,
2808 .disable = cpu_migrations_perf_counter_disable,
2809 .read = cpu_migrations_perf_counter_read,
2810 };
2811
2812 #ifdef CONFIG_EVENT_PROFILE
2813 void perf_tpcounter_event(int event_id)
2814 {
2815 struct pt_regs *regs = get_irq_regs();
2816
2817 if (!regs)
2818 regs = task_pt_regs(current);
2819
2820 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
2821 }
2822 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
2823
2824 extern int ftrace_profile_enable(int);
2825 extern void ftrace_profile_disable(int);
2826
2827 static void tp_perf_counter_destroy(struct perf_counter *counter)
2828 {
2829 ftrace_profile_disable(perf_event_id(&counter->hw_event));
2830 }
2831
2832 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
2833 {
2834 int event_id = perf_event_id(&counter->hw_event);
2835 int ret;
2836
2837 ret = ftrace_profile_enable(event_id);
2838 if (ret)
2839 return NULL;
2840
2841 counter->destroy = tp_perf_counter_destroy;
2842 counter->hw.irq_period = counter->hw_event.irq_period;
2843
2844 return &perf_ops_generic;
2845 }
2846 #else
2847 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
2848 {
2849 return NULL;
2850 }
2851 #endif
2852
2853 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
2854 {
2855 const struct pmu *pmu = NULL;
2856
2857 /*
2858 * Software counters (currently) can't in general distinguish
2859 * between user, kernel and hypervisor events.
2860 * However, context switches and cpu migrations are considered
2861 * to be kernel events, and page faults are never hypervisor
2862 * events.
2863 */
2864 switch (perf_event_id(&counter->hw_event)) {
2865 case PERF_COUNT_CPU_CLOCK:
2866 pmu = &perf_ops_cpu_clock;
2867
2868 break;
2869 case PERF_COUNT_TASK_CLOCK:
2870 /*
2871 * If the user instantiates this as a per-cpu counter,
2872 * use the cpu_clock counter instead.
2873 */
2874 if (counter->ctx->task)
2875 pmu = &perf_ops_task_clock;
2876 else
2877 pmu = &perf_ops_cpu_clock;
2878
2879 break;
2880 case PERF_COUNT_PAGE_FAULTS:
2881 case PERF_COUNT_PAGE_FAULTS_MIN:
2882 case PERF_COUNT_PAGE_FAULTS_MAJ:
2883 case PERF_COUNT_CONTEXT_SWITCHES:
2884 pmu = &perf_ops_generic;
2885 break;
2886 case PERF_COUNT_CPU_MIGRATIONS:
2887 if (!counter->hw_event.exclude_kernel)
2888 pmu = &perf_ops_cpu_migrations;
2889 break;
2890 }
2891
2892 return pmu;
2893 }
2894
2895 /*
2896 * Allocate and initialize a counter structure
2897 */
2898 static struct perf_counter *
2899 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2900 int cpu,
2901 struct perf_counter_context *ctx,
2902 struct perf_counter *group_leader,
2903 gfp_t gfpflags)
2904 {
2905 const struct pmu *pmu;
2906 struct perf_counter *counter;
2907 struct hw_perf_counter *hwc;
2908 long err;
2909
2910 counter = kzalloc(sizeof(*counter), gfpflags);
2911 if (!counter)
2912 return ERR_PTR(-ENOMEM);
2913
2914 /*
2915 * Single counters are their own group leaders, with an
2916 * empty sibling list:
2917 */
2918 if (!group_leader)
2919 group_leader = counter;
2920
2921 mutex_init(&counter->mutex);
2922 INIT_LIST_HEAD(&counter->list_entry);
2923 INIT_LIST_HEAD(&counter->event_entry);
2924 INIT_LIST_HEAD(&counter->sibling_list);
2925 init_waitqueue_head(&counter->waitq);
2926
2927 mutex_init(&counter->mmap_mutex);
2928
2929 INIT_LIST_HEAD(&counter->child_list);
2930
2931 counter->cpu = cpu;
2932 counter->hw_event = *hw_event;
2933 counter->group_leader = group_leader;
2934 counter->pmu = NULL;
2935 counter->ctx = ctx;
2936
2937 counter->state = PERF_COUNTER_STATE_INACTIVE;
2938 if (hw_event->disabled)
2939 counter->state = PERF_COUNTER_STATE_OFF;
2940
2941 pmu = NULL;
2942
2943 hwc = &counter->hw;
2944 if (hw_event->freq && hw_event->irq_freq)
2945 hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq);
2946 else
2947 hwc->irq_period = hw_event->irq_period;
2948
2949 /*
2950 * we currently do not support PERF_RECORD_GROUP on inherited counters
2951 */
2952 if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
2953 goto done;
2954
2955 if (perf_event_raw(hw_event)) {
2956 pmu = hw_perf_counter_init(counter);
2957 goto done;
2958 }
2959
2960 switch (perf_event_type(hw_event)) {
2961 case PERF_TYPE_HARDWARE:
2962 pmu = hw_perf_counter_init(counter);
2963 break;
2964
2965 case PERF_TYPE_SOFTWARE:
2966 pmu = sw_perf_counter_init(counter);
2967 break;
2968
2969 case PERF_TYPE_TRACEPOINT:
2970 pmu = tp_perf_counter_init(counter);
2971 break;
2972 }
2973 done:
2974 err = 0;
2975 if (!pmu)
2976 err = -EINVAL;
2977 else if (IS_ERR(pmu))
2978 err = PTR_ERR(pmu);
2979
2980 if (err) {
2981 kfree(counter);
2982 return ERR_PTR(err);
2983 }
2984
2985 counter->pmu = pmu;
2986
2987 atomic_inc(&nr_counters);
2988 if (counter->hw_event.mmap)
2989 atomic_inc(&nr_mmap_tracking);
2990 if (counter->hw_event.munmap)
2991 atomic_inc(&nr_munmap_tracking);
2992 if (counter->hw_event.comm)
2993 atomic_inc(&nr_comm_tracking);
2994
2995 return counter;
2996 }
2997
2998 /**
2999 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3000 *
3001 * @hw_event_uptr: event type attributes for monitoring/sampling
3002 * @pid: target pid
3003 * @cpu: target cpu
3004 * @group_fd: group leader counter fd
3005 */
3006 SYSCALL_DEFINE5(perf_counter_open,
3007 const struct perf_counter_hw_event __user *, hw_event_uptr,
3008 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3009 {
3010 struct perf_counter *counter, *group_leader;
3011 struct perf_counter_hw_event hw_event;
3012 struct perf_counter_context *ctx;
3013 struct file *counter_file = NULL;
3014 struct file *group_file = NULL;
3015 int fput_needed = 0;
3016 int fput_needed2 = 0;
3017 int ret;
3018
3019 /* for future expandability... */
3020 if (flags)
3021 return -EINVAL;
3022
3023 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
3024 return -EFAULT;
3025
3026 /*
3027 * Get the target context (task or percpu):
3028 */
3029 ctx = find_get_context(pid, cpu);
3030 if (IS_ERR(ctx))
3031 return PTR_ERR(ctx);
3032
3033 /*
3034 * Look up the group leader (we will attach this counter to it):
3035 */
3036 group_leader = NULL;
3037 if (group_fd != -1) {
3038 ret = -EINVAL;
3039 group_file = fget_light(group_fd, &fput_needed);
3040 if (!group_file)
3041 goto err_put_context;
3042 if (group_file->f_op != &perf_fops)
3043 goto err_put_context;
3044
3045 group_leader = group_file->private_data;
3046 /*
3047 * Do not allow a recursive hierarchy (this new sibling
3048 * becoming part of another group-sibling):
3049 */
3050 if (group_leader->group_leader != group_leader)
3051 goto err_put_context;
3052 /*
3053 * Do not allow to attach to a group in a different
3054 * task or CPU context:
3055 */
3056 if (group_leader->ctx != ctx)
3057 goto err_put_context;
3058 /*
3059 * Only a group leader can be exclusive or pinned
3060 */
3061 if (hw_event.exclusive || hw_event.pinned)
3062 goto err_put_context;
3063 }
3064
3065 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
3066 GFP_KERNEL);
3067 ret = PTR_ERR(counter);
3068 if (IS_ERR(counter))
3069 goto err_put_context;
3070
3071 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3072 if (ret < 0)
3073 goto err_free_put_context;
3074
3075 counter_file = fget_light(ret, &fput_needed2);
3076 if (!counter_file)
3077 goto err_free_put_context;
3078
3079 counter->filp = counter_file;
3080 mutex_lock(&ctx->mutex);
3081 perf_install_in_context(ctx, counter, cpu);
3082 mutex_unlock(&ctx->mutex);
3083
3084 fput_light(counter_file, fput_needed2);
3085
3086 out_fput:
3087 fput_light(group_file, fput_needed);
3088
3089 return ret;
3090
3091 err_free_put_context:
3092 kfree(counter);
3093
3094 err_put_context:
3095 put_context(ctx);
3096
3097 goto out_fput;
3098 }
3099
3100 /*
3101 * Initialize the perf_counter context in a task_struct:
3102 */
3103 static void
3104 __perf_counter_init_context(struct perf_counter_context *ctx,
3105 struct task_struct *task)
3106 {
3107 memset(ctx, 0, sizeof(*ctx));
3108 spin_lock_init(&ctx->lock);
3109 mutex_init(&ctx->mutex);
3110 INIT_LIST_HEAD(&ctx->counter_list);
3111 INIT_LIST_HEAD(&ctx->event_list);
3112 ctx->task = task;
3113 }
3114
3115 /*
3116 * inherit a counter from parent task to child task:
3117 */
3118 static struct perf_counter *
3119 inherit_counter(struct perf_counter *parent_counter,
3120 struct task_struct *parent,
3121 struct perf_counter_context *parent_ctx,
3122 struct task_struct *child,
3123 struct perf_counter *group_leader,
3124 struct perf_counter_context *child_ctx)
3125 {
3126 struct perf_counter *child_counter;
3127
3128 /*
3129 * Instead of creating recursive hierarchies of counters,
3130 * we link inherited counters back to the original parent,
3131 * which has a filp for sure, which we use as the reference
3132 * count:
3133 */
3134 if (parent_counter->parent)
3135 parent_counter = parent_counter->parent;
3136
3137 child_counter = perf_counter_alloc(&parent_counter->hw_event,
3138 parent_counter->cpu, child_ctx,
3139 group_leader, GFP_KERNEL);
3140 if (IS_ERR(child_counter))
3141 return child_counter;
3142
3143 /*
3144 * Link it up in the child's context:
3145 */
3146 child_counter->task = child;
3147 add_counter_to_ctx(child_counter, child_ctx);
3148
3149 child_counter->parent = parent_counter;
3150 /*
3151 * inherit into child's child as well:
3152 */
3153 child_counter->hw_event.inherit = 1;
3154
3155 /*
3156 * Get a reference to the parent filp - we will fput it
3157 * when the child counter exits. This is safe to do because
3158 * we are in the parent and we know that the filp still
3159 * exists and has a nonzero count:
3160 */
3161 atomic_long_inc(&parent_counter->filp->f_count);
3162
3163 /*
3164 * Link this into the parent counter's child list
3165 */
3166 mutex_lock(&parent_counter->mutex);
3167 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3168
3169 /*
3170 * Make the child state follow the state of the parent counter,
3171 * not its hw_event.disabled bit. We hold the parent's mutex,
3172 * so we won't race with perf_counter_{en,dis}able_family.
3173 */
3174 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3175 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3176 else
3177 child_counter->state = PERF_COUNTER_STATE_OFF;
3178
3179 mutex_unlock(&parent_counter->mutex);
3180
3181 return child_counter;
3182 }
3183
3184 static int inherit_group(struct perf_counter *parent_counter,
3185 struct task_struct *parent,
3186 struct perf_counter_context *parent_ctx,
3187 struct task_struct *child,
3188 struct perf_counter_context *child_ctx)
3189 {
3190 struct perf_counter *leader;
3191 struct perf_counter *sub;
3192 struct perf_counter *child_ctr;
3193
3194 leader = inherit_counter(parent_counter, parent, parent_ctx,
3195 child, NULL, child_ctx);
3196 if (IS_ERR(leader))
3197 return PTR_ERR(leader);
3198 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3199 child_ctr = inherit_counter(sub, parent, parent_ctx,
3200 child, leader, child_ctx);
3201 if (IS_ERR(child_ctr))
3202 return PTR_ERR(child_ctr);
3203 }
3204 return 0;
3205 }
3206
3207 static void sync_child_counter(struct perf_counter *child_counter,
3208 struct perf_counter *parent_counter)
3209 {
3210 u64 parent_val, child_val;
3211
3212 parent_val = atomic64_read(&parent_counter->count);
3213 child_val = atomic64_read(&child_counter->count);
3214
3215 /*
3216 * Add back the child's count to the parent's count:
3217 */
3218 atomic64_add(child_val, &parent_counter->count);
3219 atomic64_add(child_counter->total_time_enabled,
3220 &parent_counter->child_total_time_enabled);
3221 atomic64_add(child_counter->total_time_running,
3222 &parent_counter->child_total_time_running);
3223
3224 /*
3225 * Remove this counter from the parent's list
3226 */
3227 mutex_lock(&parent_counter->mutex);
3228 list_del_init(&child_counter->child_list);
3229 mutex_unlock(&parent_counter->mutex);
3230
3231 /*
3232 * Release the parent counter, if this was the last
3233 * reference to it.
3234 */
3235 fput(parent_counter->filp);
3236 }
3237
3238 static void
3239 __perf_counter_exit_task(struct task_struct *child,
3240 struct perf_counter *child_counter,
3241 struct perf_counter_context *child_ctx)
3242 {
3243 struct perf_counter *parent_counter;
3244 struct perf_counter *sub, *tmp;
3245
3246 /*
3247 * If we do not self-reap then we have to wait for the
3248 * child task to unschedule (it will happen for sure),
3249 * so that its counter is at its final count. (This
3250 * condition triggers rarely - child tasks usually get
3251 * off their CPU before the parent has a chance to
3252 * get this far into the reaping action)
3253 */
3254 if (child != current) {
3255 wait_task_inactive(child, 0);
3256 list_del_init(&child_counter->list_entry);
3257 update_counter_times(child_counter);
3258 } else {
3259 struct perf_cpu_context *cpuctx;
3260 unsigned long flags;
3261
3262 /*
3263 * Disable and unlink this counter.
3264 *
3265 * Be careful about zapping the list - IRQ/NMI context
3266 * could still be processing it:
3267 */
3268 local_irq_save(flags);
3269 perf_disable();
3270
3271 cpuctx = &__get_cpu_var(perf_cpu_context);
3272
3273 group_sched_out(child_counter, cpuctx, child_ctx);
3274 update_counter_times(child_counter);
3275
3276 list_del_init(&child_counter->list_entry);
3277
3278 child_ctx->nr_counters--;
3279
3280 perf_enable();
3281 local_irq_restore(flags);
3282 }
3283
3284 parent_counter = child_counter->parent;
3285 /*
3286 * It can happen that parent exits first, and has counters
3287 * that are still around due to the child reference. These
3288 * counters need to be zapped - but otherwise linger.
3289 */
3290 if (parent_counter) {
3291 sync_child_counter(child_counter, parent_counter);
3292 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
3293 list_entry) {
3294 if (sub->parent) {
3295 sync_child_counter(sub, sub->parent);
3296 free_counter(sub);
3297 }
3298 }
3299 free_counter(child_counter);
3300 }
3301 }
3302
3303 /*
3304 * When a child task exits, feed back counter values to parent counters.
3305 *
3306 * Note: we may be running in child context, but the PID is not hashed
3307 * anymore so new counters will not be added.
3308 */
3309 void perf_counter_exit_task(struct task_struct *child)
3310 {
3311 struct perf_counter *child_counter, *tmp;
3312 struct perf_counter_context *child_ctx;
3313
3314 child_ctx = &child->perf_counter_ctx;
3315
3316 if (likely(!child_ctx->nr_counters))
3317 return;
3318
3319 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3320 list_entry)
3321 __perf_counter_exit_task(child, child_counter, child_ctx);
3322 }
3323
3324 /*
3325 * Initialize the perf_counter context in task_struct
3326 */
3327 void perf_counter_init_task(struct task_struct *child)
3328 {
3329 struct perf_counter_context *child_ctx, *parent_ctx;
3330 struct perf_counter *counter;
3331 struct task_struct *parent = current;
3332
3333 child_ctx = &child->perf_counter_ctx;
3334 parent_ctx = &parent->perf_counter_ctx;
3335
3336 __perf_counter_init_context(child_ctx, child);
3337
3338 /*
3339 * This is executed from the parent task context, so inherit
3340 * counters that have been marked for cloning:
3341 */
3342
3343 if (likely(!parent_ctx->nr_counters))
3344 return;
3345
3346 /*
3347 * Lock the parent list. No need to lock the child - not PID
3348 * hashed yet and not running, so nobody can access it.
3349 */
3350 mutex_lock(&parent_ctx->mutex);
3351
3352 /*
3353 * We dont have to disable NMIs - we are only looking at
3354 * the list, not manipulating it:
3355 */
3356 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
3357 if (!counter->hw_event.inherit)
3358 continue;
3359
3360 if (inherit_group(counter, parent,
3361 parent_ctx, child, child_ctx))
3362 break;
3363 }
3364
3365 mutex_unlock(&parent_ctx->mutex);
3366 }
3367
3368 static void __cpuinit perf_counter_init_cpu(int cpu)
3369 {
3370 struct perf_cpu_context *cpuctx;
3371
3372 cpuctx = &per_cpu(perf_cpu_context, cpu);
3373 __perf_counter_init_context(&cpuctx->ctx, NULL);
3374
3375 spin_lock(&perf_resource_lock);
3376 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3377 spin_unlock(&perf_resource_lock);
3378
3379 hw_perf_counter_setup(cpu);
3380 }
3381
3382 #ifdef CONFIG_HOTPLUG_CPU
3383 static void __perf_counter_exit_cpu(void *info)
3384 {
3385 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3386 struct perf_counter_context *ctx = &cpuctx->ctx;
3387 struct perf_counter *counter, *tmp;
3388
3389 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
3390 __perf_counter_remove_from_context(counter);
3391 }
3392 static void perf_counter_exit_cpu(int cpu)
3393 {
3394 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3395 struct perf_counter_context *ctx = &cpuctx->ctx;
3396
3397 mutex_lock(&ctx->mutex);
3398 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
3399 mutex_unlock(&ctx->mutex);
3400 }
3401 #else
3402 static inline void perf_counter_exit_cpu(int cpu) { }
3403 #endif
3404
3405 static int __cpuinit
3406 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
3407 {
3408 unsigned int cpu = (long)hcpu;
3409
3410 switch (action) {
3411
3412 case CPU_UP_PREPARE:
3413 case CPU_UP_PREPARE_FROZEN:
3414 perf_counter_init_cpu(cpu);
3415 break;
3416
3417 case CPU_DOWN_PREPARE:
3418 case CPU_DOWN_PREPARE_FROZEN:
3419 perf_counter_exit_cpu(cpu);
3420 break;
3421
3422 default:
3423 break;
3424 }
3425
3426 return NOTIFY_OK;
3427 }
3428
3429 static struct notifier_block __cpuinitdata perf_cpu_nb = {
3430 .notifier_call = perf_cpu_notify,
3431 };
3432
3433 void __init perf_counter_init(void)
3434 {
3435 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3436 (void *)(long)smp_processor_id());
3437 register_cpu_notifier(&perf_cpu_nb);
3438 }
3439
3440 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
3441 {
3442 return sprintf(buf, "%d\n", perf_reserved_percpu);
3443 }
3444
3445 static ssize_t
3446 perf_set_reserve_percpu(struct sysdev_class *class,
3447 const char *buf,
3448 size_t count)
3449 {
3450 struct perf_cpu_context *cpuctx;
3451 unsigned long val;
3452 int err, cpu, mpt;
3453
3454 err = strict_strtoul(buf, 10, &val);
3455 if (err)
3456 return err;
3457 if (val > perf_max_counters)
3458 return -EINVAL;
3459
3460 spin_lock(&perf_resource_lock);
3461 perf_reserved_percpu = val;
3462 for_each_online_cpu(cpu) {
3463 cpuctx = &per_cpu(perf_cpu_context, cpu);
3464 spin_lock_irq(&cpuctx->ctx.lock);
3465 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
3466 perf_max_counters - perf_reserved_percpu);
3467 cpuctx->max_pertask = mpt;
3468 spin_unlock_irq(&cpuctx->ctx.lock);
3469 }
3470 spin_unlock(&perf_resource_lock);
3471
3472 return count;
3473 }
3474
3475 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
3476 {
3477 return sprintf(buf, "%d\n", perf_overcommit);
3478 }
3479
3480 static ssize_t
3481 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
3482 {
3483 unsigned long val;
3484 int err;
3485
3486 err = strict_strtoul(buf, 10, &val);
3487 if (err)
3488 return err;
3489 if (val > 1)
3490 return -EINVAL;
3491
3492 spin_lock(&perf_resource_lock);
3493 perf_overcommit = val;
3494 spin_unlock(&perf_resource_lock);
3495
3496 return count;
3497 }
3498
3499 static SYSDEV_CLASS_ATTR(
3500 reserve_percpu,
3501 0644,
3502 perf_show_reserve_percpu,
3503 perf_set_reserve_percpu
3504 );
3505
3506 static SYSDEV_CLASS_ATTR(
3507 overcommit,
3508 0644,
3509 perf_show_overcommit,
3510 perf_set_overcommit
3511 );
3512
3513 static struct attribute *perfclass_attrs[] = {
3514 &attr_reserve_percpu.attr,
3515 &attr_overcommit.attr,
3516 NULL
3517 };
3518
3519 static struct attribute_group perfclass_attr_group = {
3520 .attrs = perfclass_attrs,
3521 .name = "perf_counters",
3522 };
3523
3524 static int __init perf_counter_sysfs_init(void)
3525 {
3526 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
3527 &perfclass_attr_group);
3528 }
3529 device_initcall(perf_counter_sysfs_init);