2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/sysfs.h>
21 #include <linux/dcache.h>
22 #include <linux/percpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/vmstat.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hardirq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 #include <linux/syscalls.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/perf_event.h>
33 #include <linux/ftrace_event.h>
35 #include <asm/irq_regs.h>
38 * Each CPU has a list of per CPU events:
40 static DEFINE_PER_CPU(struct perf_cpu_context
, perf_cpu_context
);
42 int perf_max_events __read_mostly
= 1;
43 static int perf_reserved_percpu __read_mostly
;
44 static int perf_overcommit __read_mostly
= 1;
46 static atomic_t nr_events __read_mostly
;
47 static atomic_t nr_mmap_events __read_mostly
;
48 static atomic_t nr_comm_events __read_mostly
;
49 static atomic_t nr_task_events __read_mostly
;
52 * perf event paranoia level:
53 * -1 - not paranoid at all
54 * 0 - disallow raw tracepoint access for unpriv
55 * 1 - disallow cpu events for unpriv
56 * 2 - disallow kernel profiling for unpriv
58 int sysctl_perf_event_paranoid __read_mostly
= 1;
60 int sysctl_perf_event_mlock __read_mostly
= 512; /* 'free' kb per user */
63 * max perf event sample rate
65 int sysctl_perf_event_sample_rate __read_mostly
= 100000;
67 static atomic64_t perf_event_id
;
70 * Lock for (sysadmin-configurable) event reservations:
72 static DEFINE_SPINLOCK(perf_resource_lock
);
74 void __weak
hw_perf_disable(void) { barrier(); }
75 void __weak
hw_perf_enable(void) { barrier(); }
77 void __weak
perf_event_print_debug(void) { }
79 static DEFINE_PER_CPU(int, perf_disable_count
);
81 void perf_disable(void)
83 if (!__get_cpu_var(perf_disable_count
)++)
87 void perf_enable(void)
89 if (!--__get_cpu_var(perf_disable_count
))
93 static void get_ctx(struct perf_event_context
*ctx
)
95 WARN_ON(!atomic_inc_not_zero(&ctx
->refcount
));
98 static void free_ctx(struct rcu_head
*head
)
100 struct perf_event_context
*ctx
;
102 ctx
= container_of(head
, struct perf_event_context
, rcu_head
);
106 static void put_ctx(struct perf_event_context
*ctx
)
108 if (atomic_dec_and_test(&ctx
->refcount
)) {
110 put_ctx(ctx
->parent_ctx
);
112 put_task_struct(ctx
->task
);
113 call_rcu(&ctx
->rcu_head
, free_ctx
);
117 static void unclone_ctx(struct perf_event_context
*ctx
)
119 if (ctx
->parent_ctx
) {
120 put_ctx(ctx
->parent_ctx
);
121 ctx
->parent_ctx
= NULL
;
126 * If we inherit events we want to return the parent event id
129 static u64
primary_event_id(struct perf_event
*event
)
134 id
= event
->parent
->id
;
140 * Get the perf_event_context for a task and lock it.
141 * This has to cope with with the fact that until it is locked,
142 * the context could get moved to another task.
144 static struct perf_event_context
*
145 perf_lock_task_context(struct task_struct
*task
, unsigned long *flags
)
147 struct perf_event_context
*ctx
;
151 ctx
= rcu_dereference(task
->perf_event_ctxp
);
154 * If this context is a clone of another, it might
155 * get swapped for another underneath us by
156 * perf_event_task_sched_out, though the
157 * rcu_read_lock() protects us from any context
158 * getting freed. Lock the context and check if it
159 * got swapped before we could get the lock, and retry
160 * if so. If we locked the right context, then it
161 * can't get swapped on us any more.
163 raw_spin_lock_irqsave(&ctx
->lock
, *flags
);
164 if (ctx
!= rcu_dereference(task
->perf_event_ctxp
)) {
165 raw_spin_unlock_irqrestore(&ctx
->lock
, *flags
);
169 if (!atomic_inc_not_zero(&ctx
->refcount
)) {
170 raw_spin_unlock_irqrestore(&ctx
->lock
, *flags
);
179 * Get the context for a task and increment its pin_count so it
180 * can't get swapped to another task. This also increments its
181 * reference count so that the context can't get freed.
183 static struct perf_event_context
*perf_pin_task_context(struct task_struct
*task
)
185 struct perf_event_context
*ctx
;
188 ctx
= perf_lock_task_context(task
, &flags
);
191 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
196 static void perf_unpin_context(struct perf_event_context
*ctx
)
200 raw_spin_lock_irqsave(&ctx
->lock
, flags
);
202 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
206 static inline u64
perf_clock(void)
208 return local_clock();
212 * Update the record of the current time in a context.
214 static void update_context_time(struct perf_event_context
*ctx
)
216 u64 now
= perf_clock();
218 ctx
->time
+= now
- ctx
->timestamp
;
219 ctx
->timestamp
= now
;
223 * Update the total_time_enabled and total_time_running fields for a event.
225 static void update_event_times(struct perf_event
*event
)
227 struct perf_event_context
*ctx
= event
->ctx
;
230 if (event
->state
< PERF_EVENT_STATE_INACTIVE
||
231 event
->group_leader
->state
< PERF_EVENT_STATE_INACTIVE
)
237 run_end
= event
->tstamp_stopped
;
239 event
->total_time_enabled
= run_end
- event
->tstamp_enabled
;
241 if (event
->state
== PERF_EVENT_STATE_INACTIVE
)
242 run_end
= event
->tstamp_stopped
;
246 event
->total_time_running
= run_end
- event
->tstamp_running
;
250 * Update total_time_enabled and total_time_running for all events in a group.
252 static void update_group_times(struct perf_event
*leader
)
254 struct perf_event
*event
;
256 update_event_times(leader
);
257 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
)
258 update_event_times(event
);
261 static struct list_head
*
262 ctx_group_list(struct perf_event
*event
, struct perf_event_context
*ctx
)
264 if (event
->attr
.pinned
)
265 return &ctx
->pinned_groups
;
267 return &ctx
->flexible_groups
;
271 * Add a event from the lists for its context.
272 * Must be called with ctx->mutex and ctx->lock held.
275 list_add_event(struct perf_event
*event
, struct perf_event_context
*ctx
)
277 WARN_ON_ONCE(event
->attach_state
& PERF_ATTACH_CONTEXT
);
278 event
->attach_state
|= PERF_ATTACH_CONTEXT
;
281 * If we're a stand alone event or group leader, we go to the context
282 * list, group events are kept attached to the group so that
283 * perf_group_detach can, at all times, locate all siblings.
285 if (event
->group_leader
== event
) {
286 struct list_head
*list
;
288 if (is_software_event(event
))
289 event
->group_flags
|= PERF_GROUP_SOFTWARE
;
291 list
= ctx_group_list(event
, ctx
);
292 list_add_tail(&event
->group_entry
, list
);
295 list_add_rcu(&event
->event_entry
, &ctx
->event_list
);
297 if (event
->attr
.inherit_stat
)
301 static void perf_group_attach(struct perf_event
*event
)
303 struct perf_event
*group_leader
= event
->group_leader
;
305 WARN_ON_ONCE(event
->attach_state
& PERF_ATTACH_GROUP
);
306 event
->attach_state
|= PERF_ATTACH_GROUP
;
308 if (group_leader
== event
)
311 if (group_leader
->group_flags
& PERF_GROUP_SOFTWARE
&&
312 !is_software_event(event
))
313 group_leader
->group_flags
&= ~PERF_GROUP_SOFTWARE
;
315 list_add_tail(&event
->group_entry
, &group_leader
->sibling_list
);
316 group_leader
->nr_siblings
++;
320 * Remove a event from the lists for its context.
321 * Must be called with ctx->mutex and ctx->lock held.
324 list_del_event(struct perf_event
*event
, struct perf_event_context
*ctx
)
327 * We can have double detach due to exit/hot-unplug + close.
329 if (!(event
->attach_state
& PERF_ATTACH_CONTEXT
))
332 event
->attach_state
&= ~PERF_ATTACH_CONTEXT
;
335 if (event
->attr
.inherit_stat
)
338 list_del_rcu(&event
->event_entry
);
340 if (event
->group_leader
== event
)
341 list_del_init(&event
->group_entry
);
343 update_group_times(event
);
346 * If event was in error state, then keep it
347 * that way, otherwise bogus counts will be
348 * returned on read(). The only way to get out
349 * of error state is by explicit re-enabling
352 if (event
->state
> PERF_EVENT_STATE_OFF
)
353 event
->state
= PERF_EVENT_STATE_OFF
;
356 static void perf_group_detach(struct perf_event
*event
)
358 struct perf_event
*sibling
, *tmp
;
359 struct list_head
*list
= NULL
;
362 * We can have double detach due to exit/hot-unplug + close.
364 if (!(event
->attach_state
& PERF_ATTACH_GROUP
))
367 event
->attach_state
&= ~PERF_ATTACH_GROUP
;
370 * If this is a sibling, remove it from its group.
372 if (event
->group_leader
!= event
) {
373 list_del_init(&event
->group_entry
);
374 event
->group_leader
->nr_siblings
--;
378 if (!list_empty(&event
->group_entry
))
379 list
= &event
->group_entry
;
382 * If this was a group event with sibling events then
383 * upgrade the siblings to singleton events by adding them
384 * to whatever list we are on.
386 list_for_each_entry_safe(sibling
, tmp
, &event
->sibling_list
, group_entry
) {
388 list_move_tail(&sibling
->group_entry
, list
);
389 sibling
->group_leader
= sibling
;
391 /* Inherit group flags from the previous leader */
392 sibling
->group_flags
= event
->group_flags
;
397 event_filter_match(struct perf_event
*event
)
399 return event
->cpu
== -1 || event
->cpu
== smp_processor_id();
403 event_sched_out(struct perf_event
*event
,
404 struct perf_cpu_context
*cpuctx
,
405 struct perf_event_context
*ctx
)
409 * An event which could not be activated because of
410 * filter mismatch still needs to have its timings
411 * maintained, otherwise bogus information is return
412 * via read() for time_enabled, time_running:
414 if (event
->state
== PERF_EVENT_STATE_INACTIVE
415 && !event_filter_match(event
)) {
416 delta
= ctx
->time
- event
->tstamp_stopped
;
417 event
->tstamp_running
+= delta
;
418 event
->tstamp_stopped
= ctx
->time
;
421 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
424 event
->state
= PERF_EVENT_STATE_INACTIVE
;
425 if (event
->pending_disable
) {
426 event
->pending_disable
= 0;
427 event
->state
= PERF_EVENT_STATE_OFF
;
429 event
->tstamp_stopped
= ctx
->time
;
430 event
->pmu
->disable(event
);
433 if (!is_software_event(event
))
434 cpuctx
->active_oncpu
--;
436 if (event
->attr
.exclusive
|| !cpuctx
->active_oncpu
)
437 cpuctx
->exclusive
= 0;
441 group_sched_out(struct perf_event
*group_event
,
442 struct perf_cpu_context
*cpuctx
,
443 struct perf_event_context
*ctx
)
445 struct perf_event
*event
;
446 int state
= group_event
->state
;
448 event_sched_out(group_event
, cpuctx
, ctx
);
451 * Schedule out siblings (if any):
453 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
)
454 event_sched_out(event
, cpuctx
, ctx
);
456 if (state
== PERF_EVENT_STATE_ACTIVE
&& group_event
->attr
.exclusive
)
457 cpuctx
->exclusive
= 0;
461 * Cross CPU call to remove a performance event
463 * We disable the event on the hardware level first. After that we
464 * remove it from the context list.
466 static void __perf_event_remove_from_context(void *info
)
468 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
469 struct perf_event
*event
= info
;
470 struct perf_event_context
*ctx
= event
->ctx
;
473 * If this is a task context, we need to check whether it is
474 * the current task context of this cpu. If not it has been
475 * scheduled out before the smp call arrived.
477 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
480 raw_spin_lock(&ctx
->lock
);
482 * Protect the list operation against NMI by disabling the
483 * events on a global level.
487 event_sched_out(event
, cpuctx
, ctx
);
489 list_del_event(event
, ctx
);
493 * Allow more per task events with respect to the
496 cpuctx
->max_pertask
=
497 min(perf_max_events
- ctx
->nr_events
,
498 perf_max_events
- perf_reserved_percpu
);
502 raw_spin_unlock(&ctx
->lock
);
507 * Remove the event from a task's (or a CPU's) list of events.
509 * Must be called with ctx->mutex held.
511 * CPU events are removed with a smp call. For task events we only
512 * call when the task is on a CPU.
514 * If event->ctx is a cloned context, callers must make sure that
515 * every task struct that event->ctx->task could possibly point to
516 * remains valid. This is OK when called from perf_release since
517 * that only calls us on the top-level context, which can't be a clone.
518 * When called from perf_event_exit_task, it's OK because the
519 * context has been detached from its task.
521 static void perf_event_remove_from_context(struct perf_event
*event
)
523 struct perf_event_context
*ctx
= event
->ctx
;
524 struct task_struct
*task
= ctx
->task
;
528 * Per cpu events are removed via an smp call and
529 * the removal is always successful.
531 smp_call_function_single(event
->cpu
,
532 __perf_event_remove_from_context
,
538 task_oncpu_function_call(task
, __perf_event_remove_from_context
,
541 raw_spin_lock_irq(&ctx
->lock
);
543 * If the context is active we need to retry the smp call.
545 if (ctx
->nr_active
&& !list_empty(&event
->group_entry
)) {
546 raw_spin_unlock_irq(&ctx
->lock
);
551 * The lock prevents that this context is scheduled in so we
552 * can remove the event safely, if the call above did not
555 if (!list_empty(&event
->group_entry
))
556 list_del_event(event
, ctx
);
557 raw_spin_unlock_irq(&ctx
->lock
);
561 * Cross CPU call to disable a performance event
563 static void __perf_event_disable(void *info
)
565 struct perf_event
*event
= info
;
566 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
567 struct perf_event_context
*ctx
= event
->ctx
;
570 * If this is a per-task event, need to check whether this
571 * event's task is the current task on this cpu.
573 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
576 raw_spin_lock(&ctx
->lock
);
579 * If the event is on, turn it off.
580 * If it is in error state, leave it in error state.
582 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
) {
583 update_context_time(ctx
);
584 update_group_times(event
);
585 if (event
== event
->group_leader
)
586 group_sched_out(event
, cpuctx
, ctx
);
588 event_sched_out(event
, cpuctx
, ctx
);
589 event
->state
= PERF_EVENT_STATE_OFF
;
592 raw_spin_unlock(&ctx
->lock
);
598 * If event->ctx is a cloned context, callers must make sure that
599 * every task struct that event->ctx->task could possibly point to
600 * remains valid. This condition is satisifed when called through
601 * perf_event_for_each_child or perf_event_for_each because they
602 * hold the top-level event's child_mutex, so any descendant that
603 * goes to exit will block in sync_child_event.
604 * When called from perf_pending_event it's OK because event->ctx
605 * is the current context on this CPU and preemption is disabled,
606 * hence we can't get into perf_event_task_sched_out for this context.
608 void perf_event_disable(struct perf_event
*event
)
610 struct perf_event_context
*ctx
= event
->ctx
;
611 struct task_struct
*task
= ctx
->task
;
615 * Disable the event on the cpu that it's on
617 smp_call_function_single(event
->cpu
, __perf_event_disable
,
623 task_oncpu_function_call(task
, __perf_event_disable
, event
);
625 raw_spin_lock_irq(&ctx
->lock
);
627 * If the event is still active, we need to retry the cross-call.
629 if (event
->state
== PERF_EVENT_STATE_ACTIVE
) {
630 raw_spin_unlock_irq(&ctx
->lock
);
635 * Since we have the lock this context can't be scheduled
636 * in, so we can change the state safely.
638 if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
639 update_group_times(event
);
640 event
->state
= PERF_EVENT_STATE_OFF
;
643 raw_spin_unlock_irq(&ctx
->lock
);
647 event_sched_in(struct perf_event
*event
,
648 struct perf_cpu_context
*cpuctx
,
649 struct perf_event_context
*ctx
)
651 if (event
->state
<= PERF_EVENT_STATE_OFF
)
654 event
->state
= PERF_EVENT_STATE_ACTIVE
;
655 event
->oncpu
= smp_processor_id();
657 * The new state must be visible before we turn it on in the hardware:
661 if (event
->pmu
->enable(event
)) {
662 event
->state
= PERF_EVENT_STATE_INACTIVE
;
667 event
->tstamp_running
+= ctx
->time
- event
->tstamp_stopped
;
669 if (!is_software_event(event
))
670 cpuctx
->active_oncpu
++;
673 if (event
->attr
.exclusive
)
674 cpuctx
->exclusive
= 1;
680 group_sched_in(struct perf_event
*group_event
,
681 struct perf_cpu_context
*cpuctx
,
682 struct perf_event_context
*ctx
)
684 struct perf_event
*event
, *partial_group
= NULL
;
685 struct pmu
*pmu
= group_event
->pmu
;
688 if (group_event
->state
== PERF_EVENT_STATE_OFF
)
691 /* Check if group transaction availabe */
698 if (event_sched_in(group_event
, cpuctx
, ctx
)) {
700 pmu
->cancel_txn(pmu
);
705 * Schedule in siblings as one group (if any):
707 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
) {
708 if (event_sched_in(event
, cpuctx
, ctx
)) {
709 partial_group
= event
;
714 if (!txn
|| !pmu
->commit_txn(pmu
))
719 * Groups can be scheduled in as one unit only, so undo any
720 * partial group before returning:
722 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
) {
723 if (event
== partial_group
)
725 event_sched_out(event
, cpuctx
, ctx
);
727 event_sched_out(group_event
, cpuctx
, ctx
);
730 pmu
->cancel_txn(pmu
);
736 * Work out whether we can put this event group on the CPU now.
738 static int group_can_go_on(struct perf_event
*event
,
739 struct perf_cpu_context
*cpuctx
,
743 * Groups consisting entirely of software events can always go on.
745 if (event
->group_flags
& PERF_GROUP_SOFTWARE
)
748 * If an exclusive group is already on, no other hardware
751 if (cpuctx
->exclusive
)
754 * If this group is exclusive and there are already
755 * events on the CPU, it can't go on.
757 if (event
->attr
.exclusive
&& cpuctx
->active_oncpu
)
760 * Otherwise, try to add it if all previous groups were able
766 static void add_event_to_ctx(struct perf_event
*event
,
767 struct perf_event_context
*ctx
)
769 list_add_event(event
, ctx
);
770 perf_group_attach(event
);
771 event
->tstamp_enabled
= ctx
->time
;
772 event
->tstamp_running
= ctx
->time
;
773 event
->tstamp_stopped
= ctx
->time
;
777 * Cross CPU call to install and enable a performance event
779 * Must be called with ctx->mutex held
781 static void __perf_install_in_context(void *info
)
783 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
784 struct perf_event
*event
= info
;
785 struct perf_event_context
*ctx
= event
->ctx
;
786 struct perf_event
*leader
= event
->group_leader
;
790 * If this is a task context, we need to check whether it is
791 * the current task context of this cpu. If not it has been
792 * scheduled out before the smp call arrived.
793 * Or possibly this is the right context but it isn't
794 * on this cpu because it had no events.
796 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
) {
797 if (cpuctx
->task_ctx
|| ctx
->task
!= current
)
799 cpuctx
->task_ctx
= ctx
;
802 raw_spin_lock(&ctx
->lock
);
804 update_context_time(ctx
);
807 * Protect the list operation against NMI by disabling the
808 * events on a global level. NOP for non NMI based events.
812 add_event_to_ctx(event
, ctx
);
814 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
818 * Don't put the event on if it is disabled or if
819 * it is in a group and the group isn't on.
821 if (event
->state
!= PERF_EVENT_STATE_INACTIVE
||
822 (leader
!= event
&& leader
->state
!= PERF_EVENT_STATE_ACTIVE
))
826 * An exclusive event can't go on if there are already active
827 * hardware events, and no hardware event can go on if there
828 * is already an exclusive event on.
830 if (!group_can_go_on(event
, cpuctx
, 1))
833 err
= event_sched_in(event
, cpuctx
, ctx
);
837 * This event couldn't go on. If it is in a group
838 * then we have to pull the whole group off.
839 * If the event group is pinned then put it in error state.
842 group_sched_out(leader
, cpuctx
, ctx
);
843 if (leader
->attr
.pinned
) {
844 update_group_times(leader
);
845 leader
->state
= PERF_EVENT_STATE_ERROR
;
849 if (!err
&& !ctx
->task
&& cpuctx
->max_pertask
)
850 cpuctx
->max_pertask
--;
855 raw_spin_unlock(&ctx
->lock
);
859 * Attach a performance event to a context
861 * First we add the event to the list with the hardware enable bit
862 * in event->hw_config cleared.
864 * If the event is attached to a task which is on a CPU we use a smp
865 * call to enable it in the task context. The task might have been
866 * scheduled away, but we check this in the smp call again.
868 * Must be called with ctx->mutex held.
871 perf_install_in_context(struct perf_event_context
*ctx
,
872 struct perf_event
*event
,
875 struct task_struct
*task
= ctx
->task
;
879 * Per cpu events are installed via an smp call and
880 * the install is always successful.
882 smp_call_function_single(cpu
, __perf_install_in_context
,
888 task_oncpu_function_call(task
, __perf_install_in_context
,
891 raw_spin_lock_irq(&ctx
->lock
);
893 * we need to retry the smp call.
895 if (ctx
->is_active
&& list_empty(&event
->group_entry
)) {
896 raw_spin_unlock_irq(&ctx
->lock
);
901 * The lock prevents that this context is scheduled in so we
902 * can add the event safely, if it the call above did not
905 if (list_empty(&event
->group_entry
))
906 add_event_to_ctx(event
, ctx
);
907 raw_spin_unlock_irq(&ctx
->lock
);
911 * Put a event into inactive state and update time fields.
912 * Enabling the leader of a group effectively enables all
913 * the group members that aren't explicitly disabled, so we
914 * have to update their ->tstamp_enabled also.
915 * Note: this works for group members as well as group leaders
916 * since the non-leader members' sibling_lists will be empty.
918 static void __perf_event_mark_enabled(struct perf_event
*event
,
919 struct perf_event_context
*ctx
)
921 struct perf_event
*sub
;
923 event
->state
= PERF_EVENT_STATE_INACTIVE
;
924 event
->tstamp_enabled
= ctx
->time
- event
->total_time_enabled
;
925 list_for_each_entry(sub
, &event
->sibling_list
, group_entry
) {
926 if (sub
->state
>= PERF_EVENT_STATE_INACTIVE
) {
927 sub
->tstamp_enabled
=
928 ctx
->time
- sub
->total_time_enabled
;
934 * Cross CPU call to enable a performance event
936 static void __perf_event_enable(void *info
)
938 struct perf_event
*event
= info
;
939 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
940 struct perf_event_context
*ctx
= event
->ctx
;
941 struct perf_event
*leader
= event
->group_leader
;
945 * If this is a per-task event, need to check whether this
946 * event's task is the current task on this cpu.
948 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
) {
949 if (cpuctx
->task_ctx
|| ctx
->task
!= current
)
951 cpuctx
->task_ctx
= ctx
;
954 raw_spin_lock(&ctx
->lock
);
956 update_context_time(ctx
);
958 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
960 __perf_event_mark_enabled(event
, ctx
);
962 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
966 * If the event is in a group and isn't the group leader,
967 * then don't put it on unless the group is on.
969 if (leader
!= event
&& leader
->state
!= PERF_EVENT_STATE_ACTIVE
)
972 if (!group_can_go_on(event
, cpuctx
, 1)) {
977 err
= group_sched_in(event
, cpuctx
, ctx
);
979 err
= event_sched_in(event
, cpuctx
, ctx
);
985 * If this event can't go on and it's part of a
986 * group, then the whole group has to come off.
989 group_sched_out(leader
, cpuctx
, ctx
);
990 if (leader
->attr
.pinned
) {
991 update_group_times(leader
);
992 leader
->state
= PERF_EVENT_STATE_ERROR
;
997 raw_spin_unlock(&ctx
->lock
);
1003 * If event->ctx is a cloned context, callers must make sure that
1004 * every task struct that event->ctx->task could possibly point to
1005 * remains valid. This condition is satisfied when called through
1006 * perf_event_for_each_child or perf_event_for_each as described
1007 * for perf_event_disable.
1009 void perf_event_enable(struct perf_event
*event
)
1011 struct perf_event_context
*ctx
= event
->ctx
;
1012 struct task_struct
*task
= ctx
->task
;
1016 * Enable the event on the cpu that it's on
1018 smp_call_function_single(event
->cpu
, __perf_event_enable
,
1023 raw_spin_lock_irq(&ctx
->lock
);
1024 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
1028 * If the event is in error state, clear that first.
1029 * That way, if we see the event in error state below, we
1030 * know that it has gone back into error state, as distinct
1031 * from the task having been scheduled away before the
1032 * cross-call arrived.
1034 if (event
->state
== PERF_EVENT_STATE_ERROR
)
1035 event
->state
= PERF_EVENT_STATE_OFF
;
1038 raw_spin_unlock_irq(&ctx
->lock
);
1039 task_oncpu_function_call(task
, __perf_event_enable
, event
);
1041 raw_spin_lock_irq(&ctx
->lock
);
1044 * If the context is active and the event is still off,
1045 * we need to retry the cross-call.
1047 if (ctx
->is_active
&& event
->state
== PERF_EVENT_STATE_OFF
)
1051 * Since we have the lock this context can't be scheduled
1052 * in, so we can change the state safely.
1054 if (event
->state
== PERF_EVENT_STATE_OFF
)
1055 __perf_event_mark_enabled(event
, ctx
);
1058 raw_spin_unlock_irq(&ctx
->lock
);
1061 static int perf_event_refresh(struct perf_event
*event
, int refresh
)
1064 * not supported on inherited events
1066 if (event
->attr
.inherit
)
1069 atomic_add(refresh
, &event
->event_limit
);
1070 perf_event_enable(event
);
1076 EVENT_FLEXIBLE
= 0x1,
1078 EVENT_ALL
= EVENT_FLEXIBLE
| EVENT_PINNED
,
1081 static void ctx_sched_out(struct perf_event_context
*ctx
,
1082 struct perf_cpu_context
*cpuctx
,
1083 enum event_type_t event_type
)
1085 struct perf_event
*event
;
1087 raw_spin_lock(&ctx
->lock
);
1089 if (likely(!ctx
->nr_events
))
1091 update_context_time(ctx
);
1094 if (!ctx
->nr_active
)
1097 if (event_type
& EVENT_PINNED
) {
1098 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
)
1099 group_sched_out(event
, cpuctx
, ctx
);
1102 if (event_type
& EVENT_FLEXIBLE
) {
1103 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
)
1104 group_sched_out(event
, cpuctx
, ctx
);
1110 raw_spin_unlock(&ctx
->lock
);
1114 * Test whether two contexts are equivalent, i.e. whether they
1115 * have both been cloned from the same version of the same context
1116 * and they both have the same number of enabled events.
1117 * If the number of enabled events is the same, then the set
1118 * of enabled events should be the same, because these are both
1119 * inherited contexts, therefore we can't access individual events
1120 * in them directly with an fd; we can only enable/disable all
1121 * events via prctl, or enable/disable all events in a family
1122 * via ioctl, which will have the same effect on both contexts.
1124 static int context_equiv(struct perf_event_context
*ctx1
,
1125 struct perf_event_context
*ctx2
)
1127 return ctx1
->parent_ctx
&& ctx1
->parent_ctx
== ctx2
->parent_ctx
1128 && ctx1
->parent_gen
== ctx2
->parent_gen
1129 && !ctx1
->pin_count
&& !ctx2
->pin_count
;
1132 static void __perf_event_sync_stat(struct perf_event
*event
,
1133 struct perf_event
*next_event
)
1137 if (!event
->attr
.inherit_stat
)
1141 * Update the event value, we cannot use perf_event_read()
1142 * because we're in the middle of a context switch and have IRQs
1143 * disabled, which upsets smp_call_function_single(), however
1144 * we know the event must be on the current CPU, therefore we
1145 * don't need to use it.
1147 switch (event
->state
) {
1148 case PERF_EVENT_STATE_ACTIVE
:
1149 event
->pmu
->read(event
);
1152 case PERF_EVENT_STATE_INACTIVE
:
1153 update_event_times(event
);
1161 * In order to keep per-task stats reliable we need to flip the event
1162 * values when we flip the contexts.
1164 value
= local64_read(&next_event
->count
);
1165 value
= local64_xchg(&event
->count
, value
);
1166 local64_set(&next_event
->count
, value
);
1168 swap(event
->total_time_enabled
, next_event
->total_time_enabled
);
1169 swap(event
->total_time_running
, next_event
->total_time_running
);
1172 * Since we swizzled the values, update the user visible data too.
1174 perf_event_update_userpage(event
);
1175 perf_event_update_userpage(next_event
);
1178 #define list_next_entry(pos, member) \
1179 list_entry(pos->member.next, typeof(*pos), member)
1181 static void perf_event_sync_stat(struct perf_event_context
*ctx
,
1182 struct perf_event_context
*next_ctx
)
1184 struct perf_event
*event
, *next_event
;
1189 update_context_time(ctx
);
1191 event
= list_first_entry(&ctx
->event_list
,
1192 struct perf_event
, event_entry
);
1194 next_event
= list_first_entry(&next_ctx
->event_list
,
1195 struct perf_event
, event_entry
);
1197 while (&event
->event_entry
!= &ctx
->event_list
&&
1198 &next_event
->event_entry
!= &next_ctx
->event_list
) {
1200 __perf_event_sync_stat(event
, next_event
);
1202 event
= list_next_entry(event
, event_entry
);
1203 next_event
= list_next_entry(next_event
, event_entry
);
1208 * Called from scheduler to remove the events of the current task,
1209 * with interrupts disabled.
1211 * We stop each event and update the event value in event->count.
1213 * This does not protect us against NMI, but disable()
1214 * sets the disabled bit in the control field of event _before_
1215 * accessing the event control register. If a NMI hits, then it will
1216 * not restart the event.
1218 void perf_event_task_sched_out(struct task_struct
*task
,
1219 struct task_struct
*next
)
1221 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1222 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1223 struct perf_event_context
*next_ctx
;
1224 struct perf_event_context
*parent
;
1227 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES
, 1, 1, NULL
, 0);
1229 if (likely(!ctx
|| !cpuctx
->task_ctx
))
1233 parent
= rcu_dereference(ctx
->parent_ctx
);
1234 next_ctx
= next
->perf_event_ctxp
;
1235 if (parent
&& next_ctx
&&
1236 rcu_dereference(next_ctx
->parent_ctx
) == parent
) {
1238 * Looks like the two contexts are clones, so we might be
1239 * able to optimize the context switch. We lock both
1240 * contexts and check that they are clones under the
1241 * lock (including re-checking that neither has been
1242 * uncloned in the meantime). It doesn't matter which
1243 * order we take the locks because no other cpu could
1244 * be trying to lock both of these tasks.
1246 raw_spin_lock(&ctx
->lock
);
1247 raw_spin_lock_nested(&next_ctx
->lock
, SINGLE_DEPTH_NESTING
);
1248 if (context_equiv(ctx
, next_ctx
)) {
1250 * XXX do we need a memory barrier of sorts
1251 * wrt to rcu_dereference() of perf_event_ctxp
1253 task
->perf_event_ctxp
= next_ctx
;
1254 next
->perf_event_ctxp
= ctx
;
1256 next_ctx
->task
= task
;
1259 perf_event_sync_stat(ctx
, next_ctx
);
1261 raw_spin_unlock(&next_ctx
->lock
);
1262 raw_spin_unlock(&ctx
->lock
);
1267 ctx_sched_out(ctx
, cpuctx
, EVENT_ALL
);
1268 cpuctx
->task_ctx
= NULL
;
1272 static void task_ctx_sched_out(struct perf_event_context
*ctx
,
1273 enum event_type_t event_type
)
1275 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1277 if (!cpuctx
->task_ctx
)
1280 if (WARN_ON_ONCE(ctx
!= cpuctx
->task_ctx
))
1283 ctx_sched_out(ctx
, cpuctx
, event_type
);
1284 cpuctx
->task_ctx
= NULL
;
1288 * Called with IRQs disabled
1290 static void __perf_event_task_sched_out(struct perf_event_context
*ctx
)
1292 task_ctx_sched_out(ctx
, EVENT_ALL
);
1296 * Called with IRQs disabled
1298 static void cpu_ctx_sched_out(struct perf_cpu_context
*cpuctx
,
1299 enum event_type_t event_type
)
1301 ctx_sched_out(&cpuctx
->ctx
, cpuctx
, event_type
);
1305 ctx_pinned_sched_in(struct perf_event_context
*ctx
,
1306 struct perf_cpu_context
*cpuctx
)
1308 struct perf_event
*event
;
1310 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
) {
1311 if (event
->state
<= PERF_EVENT_STATE_OFF
)
1313 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1316 if (group_can_go_on(event
, cpuctx
, 1))
1317 group_sched_in(event
, cpuctx
, ctx
);
1320 * If this pinned group hasn't been scheduled,
1321 * put it in error state.
1323 if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
1324 update_group_times(event
);
1325 event
->state
= PERF_EVENT_STATE_ERROR
;
1331 ctx_flexible_sched_in(struct perf_event_context
*ctx
,
1332 struct perf_cpu_context
*cpuctx
)
1334 struct perf_event
*event
;
1337 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
) {
1338 /* Ignore events in OFF or ERROR state */
1339 if (event
->state
<= PERF_EVENT_STATE_OFF
)
1342 * Listen to the 'cpu' scheduling filter constraint
1345 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1348 if (group_can_go_on(event
, cpuctx
, can_add_hw
)) {
1349 if (group_sched_in(event
, cpuctx
, ctx
))
1356 ctx_sched_in(struct perf_event_context
*ctx
,
1357 struct perf_cpu_context
*cpuctx
,
1358 enum event_type_t event_type
)
1360 raw_spin_lock(&ctx
->lock
);
1362 if (likely(!ctx
->nr_events
))
1365 ctx
->timestamp
= perf_clock();
1370 * First go through the list and put on any pinned groups
1371 * in order to give them the best chance of going on.
1373 if (event_type
& EVENT_PINNED
)
1374 ctx_pinned_sched_in(ctx
, cpuctx
);
1376 /* Then walk through the lower prio flexible groups */
1377 if (event_type
& EVENT_FLEXIBLE
)
1378 ctx_flexible_sched_in(ctx
, cpuctx
);
1382 raw_spin_unlock(&ctx
->lock
);
1385 static void cpu_ctx_sched_in(struct perf_cpu_context
*cpuctx
,
1386 enum event_type_t event_type
)
1388 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
1390 ctx_sched_in(ctx
, cpuctx
, event_type
);
1393 static void task_ctx_sched_in(struct task_struct
*task
,
1394 enum event_type_t event_type
)
1396 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1397 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1401 if (cpuctx
->task_ctx
== ctx
)
1403 ctx_sched_in(ctx
, cpuctx
, event_type
);
1404 cpuctx
->task_ctx
= ctx
;
1407 * Called from scheduler to add the events of the current task
1408 * with interrupts disabled.
1410 * We restore the event value and then enable it.
1412 * This does not protect us against NMI, but enable()
1413 * sets the enabled bit in the control field of event _before_
1414 * accessing the event control register. If a NMI hits, then it will
1415 * keep the event running.
1417 void perf_event_task_sched_in(struct task_struct
*task
)
1419 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1420 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1425 if (cpuctx
->task_ctx
== ctx
)
1431 * We want to keep the following priority order:
1432 * cpu pinned (that don't need to move), task pinned,
1433 * cpu flexible, task flexible.
1435 cpu_ctx_sched_out(cpuctx
, EVENT_FLEXIBLE
);
1437 ctx_sched_in(ctx
, cpuctx
, EVENT_PINNED
);
1438 cpu_ctx_sched_in(cpuctx
, EVENT_FLEXIBLE
);
1439 ctx_sched_in(ctx
, cpuctx
, EVENT_FLEXIBLE
);
1441 cpuctx
->task_ctx
= ctx
;
1446 #define MAX_INTERRUPTS (~0ULL)
1448 static void perf_log_throttle(struct perf_event
*event
, int enable
);
1450 static u64
perf_calculate_period(struct perf_event
*event
, u64 nsec
, u64 count
)
1452 u64 frequency
= event
->attr
.sample_freq
;
1453 u64 sec
= NSEC_PER_SEC
;
1454 u64 divisor
, dividend
;
1456 int count_fls
, nsec_fls
, frequency_fls
, sec_fls
;
1458 count_fls
= fls64(count
);
1459 nsec_fls
= fls64(nsec
);
1460 frequency_fls
= fls64(frequency
);
1464 * We got @count in @nsec, with a target of sample_freq HZ
1465 * the target period becomes:
1468 * period = -------------------
1469 * @nsec * sample_freq
1474 * Reduce accuracy by one bit such that @a and @b converge
1475 * to a similar magnitude.
1477 #define REDUCE_FLS(a, b) \
1479 if (a##_fls > b##_fls) { \
1489 * Reduce accuracy until either term fits in a u64, then proceed with
1490 * the other, so that finally we can do a u64/u64 division.
1492 while (count_fls
+ sec_fls
> 64 && nsec_fls
+ frequency_fls
> 64) {
1493 REDUCE_FLS(nsec
, frequency
);
1494 REDUCE_FLS(sec
, count
);
1497 if (count_fls
+ sec_fls
> 64) {
1498 divisor
= nsec
* frequency
;
1500 while (count_fls
+ sec_fls
> 64) {
1501 REDUCE_FLS(count
, sec
);
1505 dividend
= count
* sec
;
1507 dividend
= count
* sec
;
1509 while (nsec_fls
+ frequency_fls
> 64) {
1510 REDUCE_FLS(nsec
, frequency
);
1514 divisor
= nsec
* frequency
;
1520 return div64_u64(dividend
, divisor
);
1523 static void perf_event_stop(struct perf_event
*event
)
1525 if (!event
->pmu
->stop
)
1526 return event
->pmu
->disable(event
);
1528 return event
->pmu
->stop(event
);
1531 static int perf_event_start(struct perf_event
*event
)
1533 if (!event
->pmu
->start
)
1534 return event
->pmu
->enable(event
);
1536 return event
->pmu
->start(event
);
1539 static void perf_adjust_period(struct perf_event
*event
, u64 nsec
, u64 count
)
1541 struct hw_perf_event
*hwc
= &event
->hw
;
1542 s64 period
, sample_period
;
1545 period
= perf_calculate_period(event
, nsec
, count
);
1547 delta
= (s64
)(period
- hwc
->sample_period
);
1548 delta
= (delta
+ 7) / 8; /* low pass filter */
1550 sample_period
= hwc
->sample_period
+ delta
;
1555 hwc
->sample_period
= sample_period
;
1557 if (local64_read(&hwc
->period_left
) > 8*sample_period
) {
1559 perf_event_stop(event
);
1560 local64_set(&hwc
->period_left
, 0);
1561 perf_event_start(event
);
1566 static void perf_ctx_adjust_freq(struct perf_event_context
*ctx
)
1568 struct perf_event
*event
;
1569 struct hw_perf_event
*hwc
;
1570 u64 interrupts
, now
;
1573 raw_spin_lock(&ctx
->lock
);
1574 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
1575 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
1578 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1583 interrupts
= hwc
->interrupts
;
1584 hwc
->interrupts
= 0;
1587 * unthrottle events on the tick
1589 if (interrupts
== MAX_INTERRUPTS
) {
1590 perf_log_throttle(event
, 1);
1592 event
->pmu
->unthrottle(event
);
1596 if (!event
->attr
.freq
|| !event
->attr
.sample_freq
)
1600 event
->pmu
->read(event
);
1601 now
= local64_read(&event
->count
);
1602 delta
= now
- hwc
->freq_count_stamp
;
1603 hwc
->freq_count_stamp
= now
;
1606 perf_adjust_period(event
, TICK_NSEC
, delta
);
1609 raw_spin_unlock(&ctx
->lock
);
1613 * Round-robin a context's events:
1615 static void rotate_ctx(struct perf_event_context
*ctx
)
1617 raw_spin_lock(&ctx
->lock
);
1619 /* Rotate the first entry last of non-pinned groups */
1620 list_rotate_left(&ctx
->flexible_groups
);
1622 raw_spin_unlock(&ctx
->lock
);
1625 void perf_event_task_tick(struct task_struct
*curr
)
1627 struct perf_cpu_context
*cpuctx
;
1628 struct perf_event_context
*ctx
;
1631 if (!atomic_read(&nr_events
))
1634 cpuctx
= &__get_cpu_var(perf_cpu_context
);
1635 if (cpuctx
->ctx
.nr_events
&&
1636 cpuctx
->ctx
.nr_events
!= cpuctx
->ctx
.nr_active
)
1639 ctx
= curr
->perf_event_ctxp
;
1640 if (ctx
&& ctx
->nr_events
&& ctx
->nr_events
!= ctx
->nr_active
)
1643 perf_ctx_adjust_freq(&cpuctx
->ctx
);
1645 perf_ctx_adjust_freq(ctx
);
1651 cpu_ctx_sched_out(cpuctx
, EVENT_FLEXIBLE
);
1653 task_ctx_sched_out(ctx
, EVENT_FLEXIBLE
);
1655 rotate_ctx(&cpuctx
->ctx
);
1659 cpu_ctx_sched_in(cpuctx
, EVENT_FLEXIBLE
);
1661 task_ctx_sched_in(curr
, EVENT_FLEXIBLE
);
1665 static int event_enable_on_exec(struct perf_event
*event
,
1666 struct perf_event_context
*ctx
)
1668 if (!event
->attr
.enable_on_exec
)
1671 event
->attr
.enable_on_exec
= 0;
1672 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
1675 __perf_event_mark_enabled(event
, ctx
);
1681 * Enable all of a task's events that have been marked enable-on-exec.
1682 * This expects task == current.
1684 static void perf_event_enable_on_exec(struct task_struct
*task
)
1686 struct perf_event_context
*ctx
;
1687 struct perf_event
*event
;
1688 unsigned long flags
;
1692 local_irq_save(flags
);
1693 ctx
= task
->perf_event_ctxp
;
1694 if (!ctx
|| !ctx
->nr_events
)
1697 __perf_event_task_sched_out(ctx
);
1699 raw_spin_lock(&ctx
->lock
);
1701 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
) {
1702 ret
= event_enable_on_exec(event
, ctx
);
1707 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
) {
1708 ret
= event_enable_on_exec(event
, ctx
);
1714 * Unclone this context if we enabled any event.
1719 raw_spin_unlock(&ctx
->lock
);
1721 perf_event_task_sched_in(task
);
1723 local_irq_restore(flags
);
1727 * Cross CPU call to read the hardware event
1729 static void __perf_event_read(void *info
)
1731 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1732 struct perf_event
*event
= info
;
1733 struct perf_event_context
*ctx
= event
->ctx
;
1736 * If this is a task context, we need to check whether it is
1737 * the current task context of this cpu. If not it has been
1738 * scheduled out before the smp call arrived. In that case
1739 * event->count would have been updated to a recent sample
1740 * when the event was scheduled out.
1742 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
1745 raw_spin_lock(&ctx
->lock
);
1746 update_context_time(ctx
);
1747 update_event_times(event
);
1748 raw_spin_unlock(&ctx
->lock
);
1750 event
->pmu
->read(event
);
1753 static inline u64
perf_event_count(struct perf_event
*event
)
1755 return local64_read(&event
->count
) + atomic64_read(&event
->child_count
);
1758 static u64
perf_event_read(struct perf_event
*event
)
1761 * If event is enabled and currently active on a CPU, update the
1762 * value in the event structure:
1764 if (event
->state
== PERF_EVENT_STATE_ACTIVE
) {
1765 smp_call_function_single(event
->oncpu
,
1766 __perf_event_read
, event
, 1);
1767 } else if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
1768 struct perf_event_context
*ctx
= event
->ctx
;
1769 unsigned long flags
;
1771 raw_spin_lock_irqsave(&ctx
->lock
, flags
);
1772 update_context_time(ctx
);
1773 update_event_times(event
);
1774 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
1777 return perf_event_count(event
);
1784 struct callchain_cpus_entries
{
1785 struct rcu_head rcu_head
;
1786 struct perf_callchain_entry
*cpu_entries
[0];
1789 static DEFINE_PER_CPU(int, callchain_recursion
[PERF_NR_CONTEXTS
]);
1790 static atomic_t nr_callchain_events
;
1791 static DEFINE_MUTEX(callchain_mutex
);
1792 struct callchain_cpus_entries
*callchain_cpus_entries
;
1795 __weak
void perf_callchain_kernel(struct perf_callchain_entry
*entry
,
1796 struct pt_regs
*regs
)
1800 __weak
void perf_callchain_user(struct perf_callchain_entry
*entry
,
1801 struct pt_regs
*regs
)
1805 static void release_callchain_buffers_rcu(struct rcu_head
*head
)
1807 struct callchain_cpus_entries
*entries
;
1810 entries
= container_of(head
, struct callchain_cpus_entries
, rcu_head
);
1812 for_each_possible_cpu(cpu
)
1813 kfree(entries
->cpu_entries
[cpu
]);
1818 static void release_callchain_buffers(void)
1820 struct callchain_cpus_entries
*entries
;
1822 entries
= callchain_cpus_entries
;
1823 rcu_assign_pointer(callchain_cpus_entries
, NULL
);
1824 call_rcu(&entries
->rcu_head
, release_callchain_buffers_rcu
);
1827 static int alloc_callchain_buffers(void)
1831 struct callchain_cpus_entries
*entries
;
1834 * We can't use the percpu allocation API for data that can be
1835 * accessed from NMI. Use a temporary manual per cpu allocation
1836 * until that gets sorted out.
1838 size
= sizeof(*entries
) + sizeof(struct perf_callchain_entry
*) *
1839 num_possible_cpus();
1841 entries
= kzalloc(size
, GFP_KERNEL
);
1845 size
= sizeof(struct perf_callchain_entry
) * PERF_NR_CONTEXTS
;
1847 for_each_possible_cpu(cpu
) {
1848 entries
->cpu_entries
[cpu
] = kmalloc_node(size
, GFP_KERNEL
,
1850 if (!entries
->cpu_entries
[cpu
])
1854 rcu_assign_pointer(callchain_cpus_entries
, entries
);
1859 for_each_possible_cpu(cpu
)
1860 kfree(entries
->cpu_entries
[cpu
]);
1866 static int get_callchain_buffers(void)
1871 mutex_lock(&callchain_mutex
);
1873 count
= atomic_inc_return(&nr_callchain_events
);
1874 if (WARN_ON_ONCE(count
< 1)) {
1880 /* If the allocation failed, give up */
1881 if (!callchain_cpus_entries
)
1886 err
= alloc_callchain_buffers();
1888 release_callchain_buffers();
1890 mutex_unlock(&callchain_mutex
);
1895 static void put_callchain_buffers(void)
1897 if (atomic_dec_and_mutex_lock(&nr_callchain_events
, &callchain_mutex
)) {
1898 release_callchain_buffers();
1899 mutex_unlock(&callchain_mutex
);
1903 static int get_recursion_context(int *recursion
)
1911 else if (in_softirq())
1916 if (recursion
[rctx
])
1925 static inline void put_recursion_context(int *recursion
, int rctx
)
1931 static struct perf_callchain_entry
*get_callchain_entry(int *rctx
)
1934 struct callchain_cpus_entries
*entries
;
1936 *rctx
= get_recursion_context(__get_cpu_var(callchain_recursion
));
1940 entries
= rcu_dereference(callchain_cpus_entries
);
1944 cpu
= smp_processor_id();
1946 return &entries
->cpu_entries
[cpu
][*rctx
];
1950 put_callchain_entry(int rctx
)
1952 put_recursion_context(__get_cpu_var(callchain_recursion
), rctx
);
1955 static struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
1958 struct perf_callchain_entry
*entry
;
1961 entry
= get_callchain_entry(&rctx
);
1970 if (!user_mode(regs
)) {
1971 perf_callchain_store(entry
, PERF_CONTEXT_KERNEL
);
1972 perf_callchain_kernel(entry
, regs
);
1974 regs
= task_pt_regs(current
);
1980 perf_callchain_store(entry
, PERF_CONTEXT_USER
);
1981 perf_callchain_user(entry
, regs
);
1985 put_callchain_entry(rctx
);
1991 * Initialize the perf_event context in a task_struct:
1994 __perf_event_init_context(struct perf_event_context
*ctx
,
1995 struct task_struct
*task
)
1997 raw_spin_lock_init(&ctx
->lock
);
1998 mutex_init(&ctx
->mutex
);
1999 INIT_LIST_HEAD(&ctx
->pinned_groups
);
2000 INIT_LIST_HEAD(&ctx
->flexible_groups
);
2001 INIT_LIST_HEAD(&ctx
->event_list
);
2002 atomic_set(&ctx
->refcount
, 1);
2006 static struct perf_event_context
*find_get_context(pid_t pid
, int cpu
)
2008 struct perf_event_context
*ctx
;
2009 struct perf_cpu_context
*cpuctx
;
2010 struct task_struct
*task
;
2011 unsigned long flags
;
2014 if (pid
== -1 && cpu
!= -1) {
2015 /* Must be root to operate on a CPU event: */
2016 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
2017 return ERR_PTR(-EACCES
);
2019 if (cpu
< 0 || cpu
>= nr_cpumask_bits
)
2020 return ERR_PTR(-EINVAL
);
2023 * We could be clever and allow to attach a event to an
2024 * offline CPU and activate it when the CPU comes up, but
2027 if (!cpu_online(cpu
))
2028 return ERR_PTR(-ENODEV
);
2030 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
2041 task
= find_task_by_vpid(pid
);
2043 get_task_struct(task
);
2047 return ERR_PTR(-ESRCH
);
2050 * Can't attach events to a dying task.
2053 if (task
->flags
& PF_EXITING
)
2056 /* Reuse ptrace permission checks for now. */
2058 if (!ptrace_may_access(task
, PTRACE_MODE_READ
))
2062 ctx
= perf_lock_task_context(task
, &flags
);
2065 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
2069 ctx
= kzalloc(sizeof(struct perf_event_context
), GFP_KERNEL
);
2073 __perf_event_init_context(ctx
, task
);
2075 if (cmpxchg(&task
->perf_event_ctxp
, NULL
, ctx
)) {
2077 * We raced with some other task; use
2078 * the context they set.
2083 get_task_struct(task
);
2086 put_task_struct(task
);
2090 put_task_struct(task
);
2091 return ERR_PTR(err
);
2094 static void perf_event_free_filter(struct perf_event
*event
);
2096 static void free_event_rcu(struct rcu_head
*head
)
2098 struct perf_event
*event
;
2100 event
= container_of(head
, struct perf_event
, rcu_head
);
2102 put_pid_ns(event
->ns
);
2103 perf_event_free_filter(event
);
2107 static void perf_pending_sync(struct perf_event
*event
);
2108 static void perf_buffer_put(struct perf_buffer
*buffer
);
2110 static void free_event(struct perf_event
*event
)
2112 perf_pending_sync(event
);
2114 if (!event
->parent
) {
2115 atomic_dec(&nr_events
);
2116 if (event
->attr
.mmap
|| event
->attr
.mmap_data
)
2117 atomic_dec(&nr_mmap_events
);
2118 if (event
->attr
.comm
)
2119 atomic_dec(&nr_comm_events
);
2120 if (event
->attr
.task
)
2121 atomic_dec(&nr_task_events
);
2122 if (event
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
)
2123 put_callchain_buffers();
2126 if (event
->buffer
) {
2127 perf_buffer_put(event
->buffer
);
2128 event
->buffer
= NULL
;
2132 event
->destroy(event
);
2134 put_ctx(event
->ctx
);
2135 call_rcu(&event
->rcu_head
, free_event_rcu
);
2138 int perf_event_release_kernel(struct perf_event
*event
)
2140 struct perf_event_context
*ctx
= event
->ctx
;
2143 * Remove from the PMU, can't get re-enabled since we got
2144 * here because the last ref went.
2146 perf_event_disable(event
);
2148 WARN_ON_ONCE(ctx
->parent_ctx
);
2150 * There are two ways this annotation is useful:
2152 * 1) there is a lock recursion from perf_event_exit_task
2153 * see the comment there.
2155 * 2) there is a lock-inversion with mmap_sem through
2156 * perf_event_read_group(), which takes faults while
2157 * holding ctx->mutex, however this is called after
2158 * the last filedesc died, so there is no possibility
2159 * to trigger the AB-BA case.
2161 mutex_lock_nested(&ctx
->mutex
, SINGLE_DEPTH_NESTING
);
2162 raw_spin_lock_irq(&ctx
->lock
);
2163 perf_group_detach(event
);
2164 list_del_event(event
, ctx
);
2165 raw_spin_unlock_irq(&ctx
->lock
);
2166 mutex_unlock(&ctx
->mutex
);
2168 mutex_lock(&event
->owner
->perf_event_mutex
);
2169 list_del_init(&event
->owner_entry
);
2170 mutex_unlock(&event
->owner
->perf_event_mutex
);
2171 put_task_struct(event
->owner
);
2177 EXPORT_SYMBOL_GPL(perf_event_release_kernel
);
2180 * Called when the last reference to the file is gone.
2182 static int perf_release(struct inode
*inode
, struct file
*file
)
2184 struct perf_event
*event
= file
->private_data
;
2186 file
->private_data
= NULL
;
2188 return perf_event_release_kernel(event
);
2191 static int perf_event_read_size(struct perf_event
*event
)
2193 int entry
= sizeof(u64
); /* value */
2197 if (event
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2198 size
+= sizeof(u64
);
2200 if (event
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2201 size
+= sizeof(u64
);
2203 if (event
->attr
.read_format
& PERF_FORMAT_ID
)
2204 entry
+= sizeof(u64
);
2206 if (event
->attr
.read_format
& PERF_FORMAT_GROUP
) {
2207 nr
+= event
->group_leader
->nr_siblings
;
2208 size
+= sizeof(u64
);
2216 u64
perf_event_read_value(struct perf_event
*event
, u64
*enabled
, u64
*running
)
2218 struct perf_event
*child
;
2224 mutex_lock(&event
->child_mutex
);
2225 total
+= perf_event_read(event
);
2226 *enabled
+= event
->total_time_enabled
+
2227 atomic64_read(&event
->child_total_time_enabled
);
2228 *running
+= event
->total_time_running
+
2229 atomic64_read(&event
->child_total_time_running
);
2231 list_for_each_entry(child
, &event
->child_list
, child_list
) {
2232 total
+= perf_event_read(child
);
2233 *enabled
+= child
->total_time_enabled
;
2234 *running
+= child
->total_time_running
;
2236 mutex_unlock(&event
->child_mutex
);
2240 EXPORT_SYMBOL_GPL(perf_event_read_value
);
2242 static int perf_event_read_group(struct perf_event
*event
,
2243 u64 read_format
, char __user
*buf
)
2245 struct perf_event
*leader
= event
->group_leader
, *sub
;
2246 int n
= 0, size
= 0, ret
= -EFAULT
;
2247 struct perf_event_context
*ctx
= leader
->ctx
;
2249 u64 count
, enabled
, running
;
2251 mutex_lock(&ctx
->mutex
);
2252 count
= perf_event_read_value(leader
, &enabled
, &running
);
2254 values
[n
++] = 1 + leader
->nr_siblings
;
2255 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2256 values
[n
++] = enabled
;
2257 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2258 values
[n
++] = running
;
2259 values
[n
++] = count
;
2260 if (read_format
& PERF_FORMAT_ID
)
2261 values
[n
++] = primary_event_id(leader
);
2263 size
= n
* sizeof(u64
);
2265 if (copy_to_user(buf
, values
, size
))
2270 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
2273 values
[n
++] = perf_event_read_value(sub
, &enabled
, &running
);
2274 if (read_format
& PERF_FORMAT_ID
)
2275 values
[n
++] = primary_event_id(sub
);
2277 size
= n
* sizeof(u64
);
2279 if (copy_to_user(buf
+ ret
, values
, size
)) {
2287 mutex_unlock(&ctx
->mutex
);
2292 static int perf_event_read_one(struct perf_event
*event
,
2293 u64 read_format
, char __user
*buf
)
2295 u64 enabled
, running
;
2299 values
[n
++] = perf_event_read_value(event
, &enabled
, &running
);
2300 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2301 values
[n
++] = enabled
;
2302 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2303 values
[n
++] = running
;
2304 if (read_format
& PERF_FORMAT_ID
)
2305 values
[n
++] = primary_event_id(event
);
2307 if (copy_to_user(buf
, values
, n
* sizeof(u64
)))
2310 return n
* sizeof(u64
);
2314 * Read the performance event - simple non blocking version for now
2317 perf_read_hw(struct perf_event
*event
, char __user
*buf
, size_t count
)
2319 u64 read_format
= event
->attr
.read_format
;
2323 * Return end-of-file for a read on a event that is in
2324 * error state (i.e. because it was pinned but it couldn't be
2325 * scheduled on to the CPU at some point).
2327 if (event
->state
== PERF_EVENT_STATE_ERROR
)
2330 if (count
< perf_event_read_size(event
))
2333 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2334 if (read_format
& PERF_FORMAT_GROUP
)
2335 ret
= perf_event_read_group(event
, read_format
, buf
);
2337 ret
= perf_event_read_one(event
, read_format
, buf
);
2343 perf_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
2345 struct perf_event
*event
= file
->private_data
;
2347 return perf_read_hw(event
, buf
, count
);
2350 static unsigned int perf_poll(struct file
*file
, poll_table
*wait
)
2352 struct perf_event
*event
= file
->private_data
;
2353 struct perf_buffer
*buffer
;
2354 unsigned int events
= POLL_HUP
;
2357 buffer
= rcu_dereference(event
->buffer
);
2359 events
= atomic_xchg(&buffer
->poll
, 0);
2362 poll_wait(file
, &event
->waitq
, wait
);
2367 static void perf_event_reset(struct perf_event
*event
)
2369 (void)perf_event_read(event
);
2370 local64_set(&event
->count
, 0);
2371 perf_event_update_userpage(event
);
2375 * Holding the top-level event's child_mutex means that any
2376 * descendant process that has inherited this event will block
2377 * in sync_child_event if it goes to exit, thus satisfying the
2378 * task existence requirements of perf_event_enable/disable.
2380 static void perf_event_for_each_child(struct perf_event
*event
,
2381 void (*func
)(struct perf_event
*))
2383 struct perf_event
*child
;
2385 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2386 mutex_lock(&event
->child_mutex
);
2388 list_for_each_entry(child
, &event
->child_list
, child_list
)
2390 mutex_unlock(&event
->child_mutex
);
2393 static void perf_event_for_each(struct perf_event
*event
,
2394 void (*func
)(struct perf_event
*))
2396 struct perf_event_context
*ctx
= event
->ctx
;
2397 struct perf_event
*sibling
;
2399 WARN_ON_ONCE(ctx
->parent_ctx
);
2400 mutex_lock(&ctx
->mutex
);
2401 event
= event
->group_leader
;
2403 perf_event_for_each_child(event
, func
);
2405 list_for_each_entry(sibling
, &event
->sibling_list
, group_entry
)
2406 perf_event_for_each_child(event
, func
);
2407 mutex_unlock(&ctx
->mutex
);
2410 static int perf_event_period(struct perf_event
*event
, u64 __user
*arg
)
2412 struct perf_event_context
*ctx
= event
->ctx
;
2417 if (!event
->attr
.sample_period
)
2420 size
= copy_from_user(&value
, arg
, sizeof(value
));
2421 if (size
!= sizeof(value
))
2427 raw_spin_lock_irq(&ctx
->lock
);
2428 if (event
->attr
.freq
) {
2429 if (value
> sysctl_perf_event_sample_rate
) {
2434 event
->attr
.sample_freq
= value
;
2436 event
->attr
.sample_period
= value
;
2437 event
->hw
.sample_period
= value
;
2440 raw_spin_unlock_irq(&ctx
->lock
);
2445 static const struct file_operations perf_fops
;
2447 static struct perf_event
*perf_fget_light(int fd
, int *fput_needed
)
2451 file
= fget_light(fd
, fput_needed
);
2453 return ERR_PTR(-EBADF
);
2455 if (file
->f_op
!= &perf_fops
) {
2456 fput_light(file
, *fput_needed
);
2458 return ERR_PTR(-EBADF
);
2461 return file
->private_data
;
2464 static int perf_event_set_output(struct perf_event
*event
,
2465 struct perf_event
*output_event
);
2466 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
);
2468 static long perf_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2470 struct perf_event
*event
= file
->private_data
;
2471 void (*func
)(struct perf_event
*);
2475 case PERF_EVENT_IOC_ENABLE
:
2476 func
= perf_event_enable
;
2478 case PERF_EVENT_IOC_DISABLE
:
2479 func
= perf_event_disable
;
2481 case PERF_EVENT_IOC_RESET
:
2482 func
= perf_event_reset
;
2485 case PERF_EVENT_IOC_REFRESH
:
2486 return perf_event_refresh(event
, arg
);
2488 case PERF_EVENT_IOC_PERIOD
:
2489 return perf_event_period(event
, (u64 __user
*)arg
);
2491 case PERF_EVENT_IOC_SET_OUTPUT
:
2493 struct perf_event
*output_event
= NULL
;
2494 int fput_needed
= 0;
2498 output_event
= perf_fget_light(arg
, &fput_needed
);
2499 if (IS_ERR(output_event
))
2500 return PTR_ERR(output_event
);
2503 ret
= perf_event_set_output(event
, output_event
);
2505 fput_light(output_event
->filp
, fput_needed
);
2510 case PERF_EVENT_IOC_SET_FILTER
:
2511 return perf_event_set_filter(event
, (void __user
*)arg
);
2517 if (flags
& PERF_IOC_FLAG_GROUP
)
2518 perf_event_for_each(event
, func
);
2520 perf_event_for_each_child(event
, func
);
2525 int perf_event_task_enable(void)
2527 struct perf_event
*event
;
2529 mutex_lock(¤t
->perf_event_mutex
);
2530 list_for_each_entry(event
, ¤t
->perf_event_list
, owner_entry
)
2531 perf_event_for_each_child(event
, perf_event_enable
);
2532 mutex_unlock(¤t
->perf_event_mutex
);
2537 int perf_event_task_disable(void)
2539 struct perf_event
*event
;
2541 mutex_lock(¤t
->perf_event_mutex
);
2542 list_for_each_entry(event
, ¤t
->perf_event_list
, owner_entry
)
2543 perf_event_for_each_child(event
, perf_event_disable
);
2544 mutex_unlock(¤t
->perf_event_mutex
);
2549 #ifndef PERF_EVENT_INDEX_OFFSET
2550 # define PERF_EVENT_INDEX_OFFSET 0
2553 static int perf_event_index(struct perf_event
*event
)
2555 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
2558 return event
->hw
.idx
+ 1 - PERF_EVENT_INDEX_OFFSET
;
2562 * Callers need to ensure there can be no nesting of this function, otherwise
2563 * the seqlock logic goes bad. We can not serialize this because the arch
2564 * code calls this from NMI context.
2566 void perf_event_update_userpage(struct perf_event
*event
)
2568 struct perf_event_mmap_page
*userpg
;
2569 struct perf_buffer
*buffer
;
2572 buffer
= rcu_dereference(event
->buffer
);
2576 userpg
= buffer
->user_page
;
2579 * Disable preemption so as to not let the corresponding user-space
2580 * spin too long if we get preempted.
2585 userpg
->index
= perf_event_index(event
);
2586 userpg
->offset
= perf_event_count(event
);
2587 if (event
->state
== PERF_EVENT_STATE_ACTIVE
)
2588 userpg
->offset
-= local64_read(&event
->hw
.prev_count
);
2590 userpg
->time_enabled
= event
->total_time_enabled
+
2591 atomic64_read(&event
->child_total_time_enabled
);
2593 userpg
->time_running
= event
->total_time_running
+
2594 atomic64_read(&event
->child_total_time_running
);
2603 static unsigned long perf_data_size(struct perf_buffer
*buffer
);
2606 perf_buffer_init(struct perf_buffer
*buffer
, long watermark
, int flags
)
2608 long max_size
= perf_data_size(buffer
);
2611 buffer
->watermark
= min(max_size
, watermark
);
2613 if (!buffer
->watermark
)
2614 buffer
->watermark
= max_size
/ 2;
2616 if (flags
& PERF_BUFFER_WRITABLE
)
2617 buffer
->writable
= 1;
2619 atomic_set(&buffer
->refcount
, 1);
2622 #ifndef CONFIG_PERF_USE_VMALLOC
2625 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2628 static struct page
*
2629 perf_mmap_to_page(struct perf_buffer
*buffer
, unsigned long pgoff
)
2631 if (pgoff
> buffer
->nr_pages
)
2635 return virt_to_page(buffer
->user_page
);
2637 return virt_to_page(buffer
->data_pages
[pgoff
- 1]);
2640 static void *perf_mmap_alloc_page(int cpu
)
2645 node
= (cpu
== -1) ? cpu
: cpu_to_node(cpu
);
2646 page
= alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
2650 return page_address(page
);
2653 static struct perf_buffer
*
2654 perf_buffer_alloc(int nr_pages
, long watermark
, int cpu
, int flags
)
2656 struct perf_buffer
*buffer
;
2660 size
= sizeof(struct perf_buffer
);
2661 size
+= nr_pages
* sizeof(void *);
2663 buffer
= kzalloc(size
, GFP_KERNEL
);
2667 buffer
->user_page
= perf_mmap_alloc_page(cpu
);
2668 if (!buffer
->user_page
)
2669 goto fail_user_page
;
2671 for (i
= 0; i
< nr_pages
; i
++) {
2672 buffer
->data_pages
[i
] = perf_mmap_alloc_page(cpu
);
2673 if (!buffer
->data_pages
[i
])
2674 goto fail_data_pages
;
2677 buffer
->nr_pages
= nr_pages
;
2679 perf_buffer_init(buffer
, watermark
, flags
);
2684 for (i
--; i
>= 0; i
--)
2685 free_page((unsigned long)buffer
->data_pages
[i
]);
2687 free_page((unsigned long)buffer
->user_page
);
2696 static void perf_mmap_free_page(unsigned long addr
)
2698 struct page
*page
= virt_to_page((void *)addr
);
2700 page
->mapping
= NULL
;
2704 static void perf_buffer_free(struct perf_buffer
*buffer
)
2708 perf_mmap_free_page((unsigned long)buffer
->user_page
);
2709 for (i
= 0; i
< buffer
->nr_pages
; i
++)
2710 perf_mmap_free_page((unsigned long)buffer
->data_pages
[i
]);
2714 static inline int page_order(struct perf_buffer
*buffer
)
2722 * Back perf_mmap() with vmalloc memory.
2724 * Required for architectures that have d-cache aliasing issues.
2727 static inline int page_order(struct perf_buffer
*buffer
)
2729 return buffer
->page_order
;
2732 static struct page
*
2733 perf_mmap_to_page(struct perf_buffer
*buffer
, unsigned long pgoff
)
2735 if (pgoff
> (1UL << page_order(buffer
)))
2738 return vmalloc_to_page((void *)buffer
->user_page
+ pgoff
* PAGE_SIZE
);
2741 static void perf_mmap_unmark_page(void *addr
)
2743 struct page
*page
= vmalloc_to_page(addr
);
2745 page
->mapping
= NULL
;
2748 static void perf_buffer_free_work(struct work_struct
*work
)
2750 struct perf_buffer
*buffer
;
2754 buffer
= container_of(work
, struct perf_buffer
, work
);
2755 nr
= 1 << page_order(buffer
);
2757 base
= buffer
->user_page
;
2758 for (i
= 0; i
< nr
+ 1; i
++)
2759 perf_mmap_unmark_page(base
+ (i
* PAGE_SIZE
));
2765 static void perf_buffer_free(struct perf_buffer
*buffer
)
2767 schedule_work(&buffer
->work
);
2770 static struct perf_buffer
*
2771 perf_buffer_alloc(int nr_pages
, long watermark
, int cpu
, int flags
)
2773 struct perf_buffer
*buffer
;
2777 size
= sizeof(struct perf_buffer
);
2778 size
+= sizeof(void *);
2780 buffer
= kzalloc(size
, GFP_KERNEL
);
2784 INIT_WORK(&buffer
->work
, perf_buffer_free_work
);
2786 all_buf
= vmalloc_user((nr_pages
+ 1) * PAGE_SIZE
);
2790 buffer
->user_page
= all_buf
;
2791 buffer
->data_pages
[0] = all_buf
+ PAGE_SIZE
;
2792 buffer
->page_order
= ilog2(nr_pages
);
2793 buffer
->nr_pages
= 1;
2795 perf_buffer_init(buffer
, watermark
, flags
);
2808 static unsigned long perf_data_size(struct perf_buffer
*buffer
)
2810 return buffer
->nr_pages
<< (PAGE_SHIFT
+ page_order(buffer
));
2813 static int perf_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2815 struct perf_event
*event
= vma
->vm_file
->private_data
;
2816 struct perf_buffer
*buffer
;
2817 int ret
= VM_FAULT_SIGBUS
;
2819 if (vmf
->flags
& FAULT_FLAG_MKWRITE
) {
2820 if (vmf
->pgoff
== 0)
2826 buffer
= rcu_dereference(event
->buffer
);
2830 if (vmf
->pgoff
&& (vmf
->flags
& FAULT_FLAG_WRITE
))
2833 vmf
->page
= perf_mmap_to_page(buffer
, vmf
->pgoff
);
2837 get_page(vmf
->page
);
2838 vmf
->page
->mapping
= vma
->vm_file
->f_mapping
;
2839 vmf
->page
->index
= vmf
->pgoff
;
2848 static void perf_buffer_free_rcu(struct rcu_head
*rcu_head
)
2850 struct perf_buffer
*buffer
;
2852 buffer
= container_of(rcu_head
, struct perf_buffer
, rcu_head
);
2853 perf_buffer_free(buffer
);
2856 static struct perf_buffer
*perf_buffer_get(struct perf_event
*event
)
2858 struct perf_buffer
*buffer
;
2861 buffer
= rcu_dereference(event
->buffer
);
2863 if (!atomic_inc_not_zero(&buffer
->refcount
))
2871 static void perf_buffer_put(struct perf_buffer
*buffer
)
2873 if (!atomic_dec_and_test(&buffer
->refcount
))
2876 call_rcu(&buffer
->rcu_head
, perf_buffer_free_rcu
);
2879 static void perf_mmap_open(struct vm_area_struct
*vma
)
2881 struct perf_event
*event
= vma
->vm_file
->private_data
;
2883 atomic_inc(&event
->mmap_count
);
2886 static void perf_mmap_close(struct vm_area_struct
*vma
)
2888 struct perf_event
*event
= vma
->vm_file
->private_data
;
2890 if (atomic_dec_and_mutex_lock(&event
->mmap_count
, &event
->mmap_mutex
)) {
2891 unsigned long size
= perf_data_size(event
->buffer
);
2892 struct user_struct
*user
= event
->mmap_user
;
2893 struct perf_buffer
*buffer
= event
->buffer
;
2895 atomic_long_sub((size
>> PAGE_SHIFT
) + 1, &user
->locked_vm
);
2896 vma
->vm_mm
->locked_vm
-= event
->mmap_locked
;
2897 rcu_assign_pointer(event
->buffer
, NULL
);
2898 mutex_unlock(&event
->mmap_mutex
);
2900 perf_buffer_put(buffer
);
2905 static const struct vm_operations_struct perf_mmap_vmops
= {
2906 .open
= perf_mmap_open
,
2907 .close
= perf_mmap_close
,
2908 .fault
= perf_mmap_fault
,
2909 .page_mkwrite
= perf_mmap_fault
,
2912 static int perf_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2914 struct perf_event
*event
= file
->private_data
;
2915 unsigned long user_locked
, user_lock_limit
;
2916 struct user_struct
*user
= current_user();
2917 unsigned long locked
, lock_limit
;
2918 struct perf_buffer
*buffer
;
2919 unsigned long vma_size
;
2920 unsigned long nr_pages
;
2921 long user_extra
, extra
;
2922 int ret
= 0, flags
= 0;
2925 * Don't allow mmap() of inherited per-task counters. This would
2926 * create a performance issue due to all children writing to the
2929 if (event
->cpu
== -1 && event
->attr
.inherit
)
2932 if (!(vma
->vm_flags
& VM_SHARED
))
2935 vma_size
= vma
->vm_end
- vma
->vm_start
;
2936 nr_pages
= (vma_size
/ PAGE_SIZE
) - 1;
2939 * If we have buffer pages ensure they're a power-of-two number, so we
2940 * can do bitmasks instead of modulo.
2942 if (nr_pages
!= 0 && !is_power_of_2(nr_pages
))
2945 if (vma_size
!= PAGE_SIZE
* (1 + nr_pages
))
2948 if (vma
->vm_pgoff
!= 0)
2951 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2952 mutex_lock(&event
->mmap_mutex
);
2953 if (event
->buffer
) {
2954 if (event
->buffer
->nr_pages
== nr_pages
)
2955 atomic_inc(&event
->buffer
->refcount
);
2961 user_extra
= nr_pages
+ 1;
2962 user_lock_limit
= sysctl_perf_event_mlock
>> (PAGE_SHIFT
- 10);
2965 * Increase the limit linearly with more CPUs:
2967 user_lock_limit
*= num_online_cpus();
2969 user_locked
= atomic_long_read(&user
->locked_vm
) + user_extra
;
2972 if (user_locked
> user_lock_limit
)
2973 extra
= user_locked
- user_lock_limit
;
2975 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
2976 lock_limit
>>= PAGE_SHIFT
;
2977 locked
= vma
->vm_mm
->locked_vm
+ extra
;
2979 if ((locked
> lock_limit
) && perf_paranoid_tracepoint_raw() &&
2980 !capable(CAP_IPC_LOCK
)) {
2985 WARN_ON(event
->buffer
);
2987 if (vma
->vm_flags
& VM_WRITE
)
2988 flags
|= PERF_BUFFER_WRITABLE
;
2990 buffer
= perf_buffer_alloc(nr_pages
, event
->attr
.wakeup_watermark
,
2996 rcu_assign_pointer(event
->buffer
, buffer
);
2998 atomic_long_add(user_extra
, &user
->locked_vm
);
2999 event
->mmap_locked
= extra
;
3000 event
->mmap_user
= get_current_user();
3001 vma
->vm_mm
->locked_vm
+= event
->mmap_locked
;
3005 atomic_inc(&event
->mmap_count
);
3006 mutex_unlock(&event
->mmap_mutex
);
3008 vma
->vm_flags
|= VM_RESERVED
;
3009 vma
->vm_ops
= &perf_mmap_vmops
;
3014 static int perf_fasync(int fd
, struct file
*filp
, int on
)
3016 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
3017 struct perf_event
*event
= filp
->private_data
;
3020 mutex_lock(&inode
->i_mutex
);
3021 retval
= fasync_helper(fd
, filp
, on
, &event
->fasync
);
3022 mutex_unlock(&inode
->i_mutex
);
3030 static const struct file_operations perf_fops
= {
3031 .llseek
= no_llseek
,
3032 .release
= perf_release
,
3035 .unlocked_ioctl
= perf_ioctl
,
3036 .compat_ioctl
= perf_ioctl
,
3038 .fasync
= perf_fasync
,
3044 * If there's data, ensure we set the poll() state and publish everything
3045 * to user-space before waking everybody up.
3048 void perf_event_wakeup(struct perf_event
*event
)
3050 wake_up_all(&event
->waitq
);
3052 if (event
->pending_kill
) {
3053 kill_fasync(&event
->fasync
, SIGIO
, event
->pending_kill
);
3054 event
->pending_kill
= 0;
3061 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
3063 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
3064 * single linked list and use cmpxchg() to add entries lockless.
3067 static void perf_pending_event(struct perf_pending_entry
*entry
)
3069 struct perf_event
*event
= container_of(entry
,
3070 struct perf_event
, pending
);
3072 if (event
->pending_disable
) {
3073 event
->pending_disable
= 0;
3074 __perf_event_disable(event
);
3077 if (event
->pending_wakeup
) {
3078 event
->pending_wakeup
= 0;
3079 perf_event_wakeup(event
);
3083 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
3085 static DEFINE_PER_CPU(struct perf_pending_entry
*, perf_pending_head
) = {
3089 static void perf_pending_queue(struct perf_pending_entry
*entry
,
3090 void (*func
)(struct perf_pending_entry
*))
3092 struct perf_pending_entry
**head
;
3094 if (cmpxchg(&entry
->next
, NULL
, PENDING_TAIL
) != NULL
)
3099 head
= &get_cpu_var(perf_pending_head
);
3102 entry
->next
= *head
;
3103 } while (cmpxchg(head
, entry
->next
, entry
) != entry
->next
);
3105 set_perf_event_pending();
3107 put_cpu_var(perf_pending_head
);
3110 static int __perf_pending_run(void)
3112 struct perf_pending_entry
*list
;
3115 list
= xchg(&__get_cpu_var(perf_pending_head
), PENDING_TAIL
);
3116 while (list
!= PENDING_TAIL
) {
3117 void (*func
)(struct perf_pending_entry
*);
3118 struct perf_pending_entry
*entry
= list
;
3125 * Ensure we observe the unqueue before we issue the wakeup,
3126 * so that we won't be waiting forever.
3127 * -- see perf_not_pending().
3138 static inline int perf_not_pending(struct perf_event
*event
)
3141 * If we flush on whatever cpu we run, there is a chance we don't
3145 __perf_pending_run();
3149 * Ensure we see the proper queue state before going to sleep
3150 * so that we do not miss the wakeup. -- see perf_pending_handle()
3153 return event
->pending
.next
== NULL
;
3156 static void perf_pending_sync(struct perf_event
*event
)
3158 wait_event(event
->waitq
, perf_not_pending(event
));
3161 void perf_event_do_pending(void)
3163 __perf_pending_run();
3167 * We assume there is only KVM supporting the callbacks.
3168 * Later on, we might change it to a list if there is
3169 * another virtualization implementation supporting the callbacks.
3171 struct perf_guest_info_callbacks
*perf_guest_cbs
;
3173 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks
*cbs
)
3175 perf_guest_cbs
= cbs
;
3178 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks
);
3180 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
*cbs
)
3182 perf_guest_cbs
= NULL
;
3185 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks
);
3190 static bool perf_output_space(struct perf_buffer
*buffer
, unsigned long tail
,
3191 unsigned long offset
, unsigned long head
)
3195 if (!buffer
->writable
)
3198 mask
= perf_data_size(buffer
) - 1;
3200 offset
= (offset
- tail
) & mask
;
3201 head
= (head
- tail
) & mask
;
3203 if ((int)(head
- offset
) < 0)
3209 static void perf_output_wakeup(struct perf_output_handle
*handle
)
3211 atomic_set(&handle
->buffer
->poll
, POLL_IN
);
3214 handle
->event
->pending_wakeup
= 1;
3215 perf_pending_queue(&handle
->event
->pending
,
3216 perf_pending_event
);
3218 perf_event_wakeup(handle
->event
);
3222 * We need to ensure a later event_id doesn't publish a head when a former
3223 * event isn't done writing. However since we need to deal with NMIs we
3224 * cannot fully serialize things.
3226 * We only publish the head (and generate a wakeup) when the outer-most
3229 static void perf_output_get_handle(struct perf_output_handle
*handle
)
3231 struct perf_buffer
*buffer
= handle
->buffer
;
3234 local_inc(&buffer
->nest
);
3235 handle
->wakeup
= local_read(&buffer
->wakeup
);
3238 static void perf_output_put_handle(struct perf_output_handle
*handle
)
3240 struct perf_buffer
*buffer
= handle
->buffer
;
3244 head
= local_read(&buffer
->head
);
3247 * IRQ/NMI can happen here, which means we can miss a head update.
3250 if (!local_dec_and_test(&buffer
->nest
))
3254 * Publish the known good head. Rely on the full barrier implied
3255 * by atomic_dec_and_test() order the buffer->head read and this
3258 buffer
->user_page
->data_head
= head
;
3261 * Now check if we missed an update, rely on the (compiler)
3262 * barrier in atomic_dec_and_test() to re-read buffer->head.
3264 if (unlikely(head
!= local_read(&buffer
->head
))) {
3265 local_inc(&buffer
->nest
);
3269 if (handle
->wakeup
!= local_read(&buffer
->wakeup
))
3270 perf_output_wakeup(handle
);
3276 __always_inline
void perf_output_copy(struct perf_output_handle
*handle
,
3277 const void *buf
, unsigned int len
)
3280 unsigned long size
= min_t(unsigned long, handle
->size
, len
);
3282 memcpy(handle
->addr
, buf
, size
);
3285 handle
->addr
+= size
;
3287 handle
->size
-= size
;
3288 if (!handle
->size
) {
3289 struct perf_buffer
*buffer
= handle
->buffer
;
3292 handle
->page
&= buffer
->nr_pages
- 1;
3293 handle
->addr
= buffer
->data_pages
[handle
->page
];
3294 handle
->size
= PAGE_SIZE
<< page_order(buffer
);
3299 int perf_output_begin(struct perf_output_handle
*handle
,
3300 struct perf_event
*event
, unsigned int size
,
3301 int nmi
, int sample
)
3303 struct perf_buffer
*buffer
;
3304 unsigned long tail
, offset
, head
;
3307 struct perf_event_header header
;
3314 * For inherited events we send all the output towards the parent.
3317 event
= event
->parent
;
3319 buffer
= rcu_dereference(event
->buffer
);
3323 handle
->buffer
= buffer
;
3324 handle
->event
= event
;
3326 handle
->sample
= sample
;
3328 if (!buffer
->nr_pages
)
3331 have_lost
= local_read(&buffer
->lost
);
3333 size
+= sizeof(lost_event
);
3335 perf_output_get_handle(handle
);
3339 * Userspace could choose to issue a mb() before updating the
3340 * tail pointer. So that all reads will be completed before the
3343 tail
= ACCESS_ONCE(buffer
->user_page
->data_tail
);
3345 offset
= head
= local_read(&buffer
->head
);
3347 if (unlikely(!perf_output_space(buffer
, tail
, offset
, head
)))
3349 } while (local_cmpxchg(&buffer
->head
, offset
, head
) != offset
);
3351 if (head
- local_read(&buffer
->wakeup
) > buffer
->watermark
)
3352 local_add(buffer
->watermark
, &buffer
->wakeup
);
3354 handle
->page
= offset
>> (PAGE_SHIFT
+ page_order(buffer
));
3355 handle
->page
&= buffer
->nr_pages
- 1;
3356 handle
->size
= offset
& ((PAGE_SIZE
<< page_order(buffer
)) - 1);
3357 handle
->addr
= buffer
->data_pages
[handle
->page
];
3358 handle
->addr
+= handle
->size
;
3359 handle
->size
= (PAGE_SIZE
<< page_order(buffer
)) - handle
->size
;
3362 lost_event
.header
.type
= PERF_RECORD_LOST
;
3363 lost_event
.header
.misc
= 0;
3364 lost_event
.header
.size
= sizeof(lost_event
);
3365 lost_event
.id
= event
->id
;
3366 lost_event
.lost
= local_xchg(&buffer
->lost
, 0);
3368 perf_output_put(handle
, lost_event
);
3374 local_inc(&buffer
->lost
);
3375 perf_output_put_handle(handle
);
3382 void perf_output_end(struct perf_output_handle
*handle
)
3384 struct perf_event
*event
= handle
->event
;
3385 struct perf_buffer
*buffer
= handle
->buffer
;
3387 int wakeup_events
= event
->attr
.wakeup_events
;
3389 if (handle
->sample
&& wakeup_events
) {
3390 int events
= local_inc_return(&buffer
->events
);
3391 if (events
>= wakeup_events
) {
3392 local_sub(wakeup_events
, &buffer
->events
);
3393 local_inc(&buffer
->wakeup
);
3397 perf_output_put_handle(handle
);
3401 static u32
perf_event_pid(struct perf_event
*event
, struct task_struct
*p
)
3404 * only top level events have the pid namespace they were created in
3407 event
= event
->parent
;
3409 return task_tgid_nr_ns(p
, event
->ns
);
3412 static u32
perf_event_tid(struct perf_event
*event
, struct task_struct
*p
)
3415 * only top level events have the pid namespace they were created in
3418 event
= event
->parent
;
3420 return task_pid_nr_ns(p
, event
->ns
);
3423 static void perf_output_read_one(struct perf_output_handle
*handle
,
3424 struct perf_event
*event
)
3426 u64 read_format
= event
->attr
.read_format
;
3430 values
[n
++] = perf_event_count(event
);
3431 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
3432 values
[n
++] = event
->total_time_enabled
+
3433 atomic64_read(&event
->child_total_time_enabled
);
3435 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
3436 values
[n
++] = event
->total_time_running
+
3437 atomic64_read(&event
->child_total_time_running
);
3439 if (read_format
& PERF_FORMAT_ID
)
3440 values
[n
++] = primary_event_id(event
);
3442 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3446 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3448 static void perf_output_read_group(struct perf_output_handle
*handle
,
3449 struct perf_event
*event
)
3451 struct perf_event
*leader
= event
->group_leader
, *sub
;
3452 u64 read_format
= event
->attr
.read_format
;
3456 values
[n
++] = 1 + leader
->nr_siblings
;
3458 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
3459 values
[n
++] = leader
->total_time_enabled
;
3461 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
3462 values
[n
++] = leader
->total_time_running
;
3464 if (leader
!= event
)
3465 leader
->pmu
->read(leader
);
3467 values
[n
++] = perf_event_count(leader
);
3468 if (read_format
& PERF_FORMAT_ID
)
3469 values
[n
++] = primary_event_id(leader
);
3471 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3473 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
3477 sub
->pmu
->read(sub
);
3479 values
[n
++] = perf_event_count(sub
);
3480 if (read_format
& PERF_FORMAT_ID
)
3481 values
[n
++] = primary_event_id(sub
);
3483 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3487 static void perf_output_read(struct perf_output_handle
*handle
,
3488 struct perf_event
*event
)
3490 if (event
->attr
.read_format
& PERF_FORMAT_GROUP
)
3491 perf_output_read_group(handle
, event
);
3493 perf_output_read_one(handle
, event
);
3496 void perf_output_sample(struct perf_output_handle
*handle
,
3497 struct perf_event_header
*header
,
3498 struct perf_sample_data
*data
,
3499 struct perf_event
*event
)
3501 u64 sample_type
= data
->type
;
3503 perf_output_put(handle
, *header
);
3505 if (sample_type
& PERF_SAMPLE_IP
)
3506 perf_output_put(handle
, data
->ip
);
3508 if (sample_type
& PERF_SAMPLE_TID
)
3509 perf_output_put(handle
, data
->tid_entry
);
3511 if (sample_type
& PERF_SAMPLE_TIME
)
3512 perf_output_put(handle
, data
->time
);
3514 if (sample_type
& PERF_SAMPLE_ADDR
)
3515 perf_output_put(handle
, data
->addr
);
3517 if (sample_type
& PERF_SAMPLE_ID
)
3518 perf_output_put(handle
, data
->id
);
3520 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
3521 perf_output_put(handle
, data
->stream_id
);
3523 if (sample_type
& PERF_SAMPLE_CPU
)
3524 perf_output_put(handle
, data
->cpu_entry
);
3526 if (sample_type
& PERF_SAMPLE_PERIOD
)
3527 perf_output_put(handle
, data
->period
);
3529 if (sample_type
& PERF_SAMPLE_READ
)
3530 perf_output_read(handle
, event
);
3532 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
3533 if (data
->callchain
) {
3536 if (data
->callchain
)
3537 size
+= data
->callchain
->nr
;
3539 size
*= sizeof(u64
);
3541 perf_output_copy(handle
, data
->callchain
, size
);
3544 perf_output_put(handle
, nr
);
3548 if (sample_type
& PERF_SAMPLE_RAW
) {
3550 perf_output_put(handle
, data
->raw
->size
);
3551 perf_output_copy(handle
, data
->raw
->data
,
3558 .size
= sizeof(u32
),
3561 perf_output_put(handle
, raw
);
3566 void perf_prepare_sample(struct perf_event_header
*header
,
3567 struct perf_sample_data
*data
,
3568 struct perf_event
*event
,
3569 struct pt_regs
*regs
)
3571 u64 sample_type
= event
->attr
.sample_type
;
3573 data
->type
= sample_type
;
3575 header
->type
= PERF_RECORD_SAMPLE
;
3576 header
->size
= sizeof(*header
);
3579 header
->misc
|= perf_misc_flags(regs
);
3581 if (sample_type
& PERF_SAMPLE_IP
) {
3582 data
->ip
= perf_instruction_pointer(regs
);
3584 header
->size
+= sizeof(data
->ip
);
3587 if (sample_type
& PERF_SAMPLE_TID
) {
3588 /* namespace issues */
3589 data
->tid_entry
.pid
= perf_event_pid(event
, current
);
3590 data
->tid_entry
.tid
= perf_event_tid(event
, current
);
3592 header
->size
+= sizeof(data
->tid_entry
);
3595 if (sample_type
& PERF_SAMPLE_TIME
) {
3596 data
->time
= perf_clock();
3598 header
->size
+= sizeof(data
->time
);
3601 if (sample_type
& PERF_SAMPLE_ADDR
)
3602 header
->size
+= sizeof(data
->addr
);
3604 if (sample_type
& PERF_SAMPLE_ID
) {
3605 data
->id
= primary_event_id(event
);
3607 header
->size
+= sizeof(data
->id
);
3610 if (sample_type
& PERF_SAMPLE_STREAM_ID
) {
3611 data
->stream_id
= event
->id
;
3613 header
->size
+= sizeof(data
->stream_id
);
3616 if (sample_type
& PERF_SAMPLE_CPU
) {
3617 data
->cpu_entry
.cpu
= raw_smp_processor_id();
3618 data
->cpu_entry
.reserved
= 0;
3620 header
->size
+= sizeof(data
->cpu_entry
);
3623 if (sample_type
& PERF_SAMPLE_PERIOD
)
3624 header
->size
+= sizeof(data
->period
);
3626 if (sample_type
& PERF_SAMPLE_READ
)
3627 header
->size
+= perf_event_read_size(event
);
3629 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
3632 data
->callchain
= perf_callchain(regs
);
3634 if (data
->callchain
)
3635 size
+= data
->callchain
->nr
;
3637 header
->size
+= size
* sizeof(u64
);
3640 if (sample_type
& PERF_SAMPLE_RAW
) {
3641 int size
= sizeof(u32
);
3644 size
+= data
->raw
->size
;
3646 size
+= sizeof(u32
);
3648 WARN_ON_ONCE(size
& (sizeof(u64
)-1));
3649 header
->size
+= size
;
3653 static void perf_event_output(struct perf_event
*event
, int nmi
,
3654 struct perf_sample_data
*data
,
3655 struct pt_regs
*regs
)
3657 struct perf_output_handle handle
;
3658 struct perf_event_header header
;
3660 /* protect the callchain buffers */
3663 perf_prepare_sample(&header
, data
, event
, regs
);
3665 if (perf_output_begin(&handle
, event
, header
.size
, nmi
, 1))
3668 perf_output_sample(&handle
, &header
, data
, event
);
3670 perf_output_end(&handle
);
3680 struct perf_read_event
{
3681 struct perf_event_header header
;
3688 perf_event_read_event(struct perf_event
*event
,
3689 struct task_struct
*task
)
3691 struct perf_output_handle handle
;
3692 struct perf_read_event read_event
= {
3694 .type
= PERF_RECORD_READ
,
3696 .size
= sizeof(read_event
) + perf_event_read_size(event
),
3698 .pid
= perf_event_pid(event
, task
),
3699 .tid
= perf_event_tid(event
, task
),
3703 ret
= perf_output_begin(&handle
, event
, read_event
.header
.size
, 0, 0);
3707 perf_output_put(&handle
, read_event
);
3708 perf_output_read(&handle
, event
);
3710 perf_output_end(&handle
);
3714 * task tracking -- fork/exit
3716 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
3719 struct perf_task_event
{
3720 struct task_struct
*task
;
3721 struct perf_event_context
*task_ctx
;
3724 struct perf_event_header header
;
3734 static void perf_event_task_output(struct perf_event
*event
,
3735 struct perf_task_event
*task_event
)
3737 struct perf_output_handle handle
;
3738 struct task_struct
*task
= task_event
->task
;
3741 size
= task_event
->event_id
.header
.size
;
3742 ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3747 task_event
->event_id
.pid
= perf_event_pid(event
, task
);
3748 task_event
->event_id
.ppid
= perf_event_pid(event
, current
);
3750 task_event
->event_id
.tid
= perf_event_tid(event
, task
);
3751 task_event
->event_id
.ptid
= perf_event_tid(event
, current
);
3753 perf_output_put(&handle
, task_event
->event_id
);
3755 perf_output_end(&handle
);
3758 static int perf_event_task_match(struct perf_event
*event
)
3760 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3763 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
3766 if (event
->attr
.comm
|| event
->attr
.mmap
||
3767 event
->attr
.mmap_data
|| event
->attr
.task
)
3773 static void perf_event_task_ctx(struct perf_event_context
*ctx
,
3774 struct perf_task_event
*task_event
)
3776 struct perf_event
*event
;
3778 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3779 if (perf_event_task_match(event
))
3780 perf_event_task_output(event
, task_event
);
3784 static void perf_event_task_event(struct perf_task_event
*task_event
)
3786 struct perf_cpu_context
*cpuctx
;
3787 struct perf_event_context
*ctx
= task_event
->task_ctx
;
3790 cpuctx
= &get_cpu_var(perf_cpu_context
);
3791 perf_event_task_ctx(&cpuctx
->ctx
, task_event
);
3793 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3795 perf_event_task_ctx(ctx
, task_event
);
3796 put_cpu_var(perf_cpu_context
);
3800 static void perf_event_task(struct task_struct
*task
,
3801 struct perf_event_context
*task_ctx
,
3804 struct perf_task_event task_event
;
3806 if (!atomic_read(&nr_comm_events
) &&
3807 !atomic_read(&nr_mmap_events
) &&
3808 !atomic_read(&nr_task_events
))
3811 task_event
= (struct perf_task_event
){
3813 .task_ctx
= task_ctx
,
3816 .type
= new ? PERF_RECORD_FORK
: PERF_RECORD_EXIT
,
3818 .size
= sizeof(task_event
.event_id
),
3824 .time
= perf_clock(),
3828 perf_event_task_event(&task_event
);
3831 void perf_event_fork(struct task_struct
*task
)
3833 perf_event_task(task
, NULL
, 1);
3840 struct perf_comm_event
{
3841 struct task_struct
*task
;
3846 struct perf_event_header header
;
3853 static void perf_event_comm_output(struct perf_event
*event
,
3854 struct perf_comm_event
*comm_event
)
3856 struct perf_output_handle handle
;
3857 int size
= comm_event
->event_id
.header
.size
;
3858 int ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3863 comm_event
->event_id
.pid
= perf_event_pid(event
, comm_event
->task
);
3864 comm_event
->event_id
.tid
= perf_event_tid(event
, comm_event
->task
);
3866 perf_output_put(&handle
, comm_event
->event_id
);
3867 perf_output_copy(&handle
, comm_event
->comm
,
3868 comm_event
->comm_size
);
3869 perf_output_end(&handle
);
3872 static int perf_event_comm_match(struct perf_event
*event
)
3874 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3877 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
3880 if (event
->attr
.comm
)
3886 static void perf_event_comm_ctx(struct perf_event_context
*ctx
,
3887 struct perf_comm_event
*comm_event
)
3889 struct perf_event
*event
;
3891 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3892 if (perf_event_comm_match(event
))
3893 perf_event_comm_output(event
, comm_event
);
3897 static void perf_event_comm_event(struct perf_comm_event
*comm_event
)
3899 struct perf_cpu_context
*cpuctx
;
3900 struct perf_event_context
*ctx
;
3902 char comm
[TASK_COMM_LEN
];
3904 memset(comm
, 0, sizeof(comm
));
3905 strlcpy(comm
, comm_event
->task
->comm
, sizeof(comm
));
3906 size
= ALIGN(strlen(comm
)+1, sizeof(u64
));
3908 comm_event
->comm
= comm
;
3909 comm_event
->comm_size
= size
;
3911 comm_event
->event_id
.header
.size
= sizeof(comm_event
->event_id
) + size
;
3914 cpuctx
= &get_cpu_var(perf_cpu_context
);
3915 perf_event_comm_ctx(&cpuctx
->ctx
, comm_event
);
3916 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3918 perf_event_comm_ctx(ctx
, comm_event
);
3919 put_cpu_var(perf_cpu_context
);
3923 void perf_event_comm(struct task_struct
*task
)
3925 struct perf_comm_event comm_event
;
3927 if (task
->perf_event_ctxp
)
3928 perf_event_enable_on_exec(task
);
3930 if (!atomic_read(&nr_comm_events
))
3933 comm_event
= (struct perf_comm_event
){
3939 .type
= PERF_RECORD_COMM
,
3948 perf_event_comm_event(&comm_event
);
3955 struct perf_mmap_event
{
3956 struct vm_area_struct
*vma
;
3958 const char *file_name
;
3962 struct perf_event_header header
;
3972 static void perf_event_mmap_output(struct perf_event
*event
,
3973 struct perf_mmap_event
*mmap_event
)
3975 struct perf_output_handle handle
;
3976 int size
= mmap_event
->event_id
.header
.size
;
3977 int ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3982 mmap_event
->event_id
.pid
= perf_event_pid(event
, current
);
3983 mmap_event
->event_id
.tid
= perf_event_tid(event
, current
);
3985 perf_output_put(&handle
, mmap_event
->event_id
);
3986 perf_output_copy(&handle
, mmap_event
->file_name
,
3987 mmap_event
->file_size
);
3988 perf_output_end(&handle
);
3991 static int perf_event_mmap_match(struct perf_event
*event
,
3992 struct perf_mmap_event
*mmap_event
,
3995 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3998 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
4001 if ((!executable
&& event
->attr
.mmap_data
) ||
4002 (executable
&& event
->attr
.mmap
))
4008 static void perf_event_mmap_ctx(struct perf_event_context
*ctx
,
4009 struct perf_mmap_event
*mmap_event
,
4012 struct perf_event
*event
;
4014 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
4015 if (perf_event_mmap_match(event
, mmap_event
, executable
))
4016 perf_event_mmap_output(event
, mmap_event
);
4020 static void perf_event_mmap_event(struct perf_mmap_event
*mmap_event
)
4022 struct perf_cpu_context
*cpuctx
;
4023 struct perf_event_context
*ctx
;
4024 struct vm_area_struct
*vma
= mmap_event
->vma
;
4025 struct file
*file
= vma
->vm_file
;
4031 memset(tmp
, 0, sizeof(tmp
));
4035 * d_path works from the end of the buffer backwards, so we
4036 * need to add enough zero bytes after the string to handle
4037 * the 64bit alignment we do later.
4039 buf
= kzalloc(PATH_MAX
+ sizeof(u64
), GFP_KERNEL
);
4041 name
= strncpy(tmp
, "//enomem", sizeof(tmp
));
4044 name
= d_path(&file
->f_path
, buf
, PATH_MAX
);
4046 name
= strncpy(tmp
, "//toolong", sizeof(tmp
));
4050 if (arch_vma_name(mmap_event
->vma
)) {
4051 name
= strncpy(tmp
, arch_vma_name(mmap_event
->vma
),
4057 name
= strncpy(tmp
, "[vdso]", sizeof(tmp
));
4059 } else if (vma
->vm_start
<= vma
->vm_mm
->start_brk
&&
4060 vma
->vm_end
>= vma
->vm_mm
->brk
) {
4061 name
= strncpy(tmp
, "[heap]", sizeof(tmp
));
4063 } else if (vma
->vm_start
<= vma
->vm_mm
->start_stack
&&
4064 vma
->vm_end
>= vma
->vm_mm
->start_stack
) {
4065 name
= strncpy(tmp
, "[stack]", sizeof(tmp
));
4069 name
= strncpy(tmp
, "//anon", sizeof(tmp
));
4074 size
= ALIGN(strlen(name
)+1, sizeof(u64
));
4076 mmap_event
->file_name
= name
;
4077 mmap_event
->file_size
= size
;
4079 mmap_event
->event_id
.header
.size
= sizeof(mmap_event
->event_id
) + size
;
4082 cpuctx
= &get_cpu_var(perf_cpu_context
);
4083 perf_event_mmap_ctx(&cpuctx
->ctx
, mmap_event
, vma
->vm_flags
& VM_EXEC
);
4084 ctx
= rcu_dereference(current
->perf_event_ctxp
);
4086 perf_event_mmap_ctx(ctx
, mmap_event
, vma
->vm_flags
& VM_EXEC
);
4087 put_cpu_var(perf_cpu_context
);
4093 void perf_event_mmap(struct vm_area_struct
*vma
)
4095 struct perf_mmap_event mmap_event
;
4097 if (!atomic_read(&nr_mmap_events
))
4100 mmap_event
= (struct perf_mmap_event
){
4106 .type
= PERF_RECORD_MMAP
,
4107 .misc
= PERF_RECORD_MISC_USER
,
4112 .start
= vma
->vm_start
,
4113 .len
= vma
->vm_end
- vma
->vm_start
,
4114 .pgoff
= (u64
)vma
->vm_pgoff
<< PAGE_SHIFT
,
4118 perf_event_mmap_event(&mmap_event
);
4122 * IRQ throttle logging
4125 static void perf_log_throttle(struct perf_event
*event
, int enable
)
4127 struct perf_output_handle handle
;
4131 struct perf_event_header header
;
4135 } throttle_event
= {
4137 .type
= PERF_RECORD_THROTTLE
,
4139 .size
= sizeof(throttle_event
),
4141 .time
= perf_clock(),
4142 .id
= primary_event_id(event
),
4143 .stream_id
= event
->id
,
4147 throttle_event
.header
.type
= PERF_RECORD_UNTHROTTLE
;
4149 ret
= perf_output_begin(&handle
, event
, sizeof(throttle_event
), 1, 0);
4153 perf_output_put(&handle
, throttle_event
);
4154 perf_output_end(&handle
);
4158 * Generic event overflow handling, sampling.
4161 static int __perf_event_overflow(struct perf_event
*event
, int nmi
,
4162 int throttle
, struct perf_sample_data
*data
,
4163 struct pt_regs
*regs
)
4165 int events
= atomic_read(&event
->event_limit
);
4166 struct hw_perf_event
*hwc
= &event
->hw
;
4169 throttle
= (throttle
&& event
->pmu
->unthrottle
!= NULL
);
4174 if (hwc
->interrupts
!= MAX_INTERRUPTS
) {
4176 if (HZ
* hwc
->interrupts
>
4177 (u64
)sysctl_perf_event_sample_rate
) {
4178 hwc
->interrupts
= MAX_INTERRUPTS
;
4179 perf_log_throttle(event
, 0);
4184 * Keep re-disabling events even though on the previous
4185 * pass we disabled it - just in case we raced with a
4186 * sched-in and the event got enabled again:
4192 if (event
->attr
.freq
) {
4193 u64 now
= perf_clock();
4194 s64 delta
= now
- hwc
->freq_time_stamp
;
4196 hwc
->freq_time_stamp
= now
;
4198 if (delta
> 0 && delta
< 2*TICK_NSEC
)
4199 perf_adjust_period(event
, delta
, hwc
->last_period
);
4203 * XXX event_limit might not quite work as expected on inherited
4207 event
->pending_kill
= POLL_IN
;
4208 if (events
&& atomic_dec_and_test(&event
->event_limit
)) {
4210 event
->pending_kill
= POLL_HUP
;
4212 event
->pending_disable
= 1;
4213 perf_pending_queue(&event
->pending
,
4214 perf_pending_event
);
4216 perf_event_disable(event
);
4219 if (event
->overflow_handler
)
4220 event
->overflow_handler(event
, nmi
, data
, regs
);
4222 perf_event_output(event
, nmi
, data
, regs
);
4227 int perf_event_overflow(struct perf_event
*event
, int nmi
,
4228 struct perf_sample_data
*data
,
4229 struct pt_regs
*regs
)
4231 return __perf_event_overflow(event
, nmi
, 1, data
, regs
);
4235 * Generic software event infrastructure
4239 * We directly increment event->count and keep a second value in
4240 * event->hw.period_left to count intervals. This period event
4241 * is kept in the range [-sample_period, 0] so that we can use the
4245 static u64
perf_swevent_set_period(struct perf_event
*event
)
4247 struct hw_perf_event
*hwc
= &event
->hw
;
4248 u64 period
= hwc
->last_period
;
4252 hwc
->last_period
= hwc
->sample_period
;
4255 old
= val
= local64_read(&hwc
->period_left
);
4259 nr
= div64_u64(period
+ val
, period
);
4260 offset
= nr
* period
;
4262 if (local64_cmpxchg(&hwc
->period_left
, old
, val
) != old
)
4268 static void perf_swevent_overflow(struct perf_event
*event
, u64 overflow
,
4269 int nmi
, struct perf_sample_data
*data
,
4270 struct pt_regs
*regs
)
4272 struct hw_perf_event
*hwc
= &event
->hw
;
4275 data
->period
= event
->hw
.last_period
;
4277 overflow
= perf_swevent_set_period(event
);
4279 if (hwc
->interrupts
== MAX_INTERRUPTS
)
4282 for (; overflow
; overflow
--) {
4283 if (__perf_event_overflow(event
, nmi
, throttle
,
4286 * We inhibit the overflow from happening when
4287 * hwc->interrupts == MAX_INTERRUPTS.
4295 static void perf_swevent_add(struct perf_event
*event
, u64 nr
,
4296 int nmi
, struct perf_sample_data
*data
,
4297 struct pt_regs
*regs
)
4299 struct hw_perf_event
*hwc
= &event
->hw
;
4301 local64_add(nr
, &event
->count
);
4306 if (!hwc
->sample_period
)
4309 if (nr
== 1 && hwc
->sample_period
== 1 && !event
->attr
.freq
)
4310 return perf_swevent_overflow(event
, 1, nmi
, data
, regs
);
4312 if (local64_add_negative(nr
, &hwc
->period_left
))
4315 perf_swevent_overflow(event
, 0, nmi
, data
, regs
);
4318 static int perf_exclude_event(struct perf_event
*event
,
4319 struct pt_regs
*regs
)
4322 if (event
->attr
.exclude_user
&& user_mode(regs
))
4325 if (event
->attr
.exclude_kernel
&& !user_mode(regs
))
4332 static int perf_swevent_match(struct perf_event
*event
,
4333 enum perf_type_id type
,
4335 struct perf_sample_data
*data
,
4336 struct pt_regs
*regs
)
4338 if (event
->attr
.type
!= type
)
4341 if (event
->attr
.config
!= event_id
)
4344 if (perf_exclude_event(event
, regs
))
4350 static inline u64
swevent_hash(u64 type
, u32 event_id
)
4352 u64 val
= event_id
| (type
<< 32);
4354 return hash_64(val
, SWEVENT_HLIST_BITS
);
4357 static inline struct hlist_head
*
4358 __find_swevent_head(struct swevent_hlist
*hlist
, u64 type
, u32 event_id
)
4360 u64 hash
= swevent_hash(type
, event_id
);
4362 return &hlist
->heads
[hash
];
4365 /* For the read side: events when they trigger */
4366 static inline struct hlist_head
*
4367 find_swevent_head_rcu(struct perf_cpu_context
*ctx
, u64 type
, u32 event_id
)
4369 struct swevent_hlist
*hlist
;
4371 hlist
= rcu_dereference(ctx
->swevent_hlist
);
4375 return __find_swevent_head(hlist
, type
, event_id
);
4378 /* For the event head insertion and removal in the hlist */
4379 static inline struct hlist_head
*
4380 find_swevent_head(struct perf_cpu_context
*ctx
, struct perf_event
*event
)
4382 struct swevent_hlist
*hlist
;
4383 u32 event_id
= event
->attr
.config
;
4384 u64 type
= event
->attr
.type
;
4387 * Event scheduling is always serialized against hlist allocation
4388 * and release. Which makes the protected version suitable here.
4389 * The context lock guarantees that.
4391 hlist
= rcu_dereference_protected(ctx
->swevent_hlist
,
4392 lockdep_is_held(&event
->ctx
->lock
));
4396 return __find_swevent_head(hlist
, type
, event_id
);
4399 static void do_perf_sw_event(enum perf_type_id type
, u32 event_id
,
4401 struct perf_sample_data
*data
,
4402 struct pt_regs
*regs
)
4404 struct perf_cpu_context
*cpuctx
;
4405 struct perf_event
*event
;
4406 struct hlist_node
*node
;
4407 struct hlist_head
*head
;
4409 cpuctx
= &__get_cpu_var(perf_cpu_context
);
4413 head
= find_swevent_head_rcu(cpuctx
, type
, event_id
);
4418 hlist_for_each_entry_rcu(event
, node
, head
, hlist_entry
) {
4419 if (perf_swevent_match(event
, type
, event_id
, data
, regs
))
4420 perf_swevent_add(event
, nr
, nmi
, data
, regs
);
4426 int perf_swevent_get_recursion_context(void)
4428 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
4430 return get_recursion_context(cpuctx
->recursion
);
4432 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context
);
4434 void inline perf_swevent_put_recursion_context(int rctx
)
4436 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
4438 put_recursion_context(cpuctx
->recursion
, rctx
);
4441 void __perf_sw_event(u32 event_id
, u64 nr
, int nmi
,
4442 struct pt_regs
*regs
, u64 addr
)
4444 struct perf_sample_data data
;
4447 preempt_disable_notrace();
4448 rctx
= perf_swevent_get_recursion_context();
4452 perf_sample_data_init(&data
, addr
);
4454 do_perf_sw_event(PERF_TYPE_SOFTWARE
, event_id
, nr
, nmi
, &data
, regs
);
4456 perf_swevent_put_recursion_context(rctx
);
4457 preempt_enable_notrace();
4460 static void perf_swevent_read(struct perf_event
*event
)
4464 static int perf_swevent_enable(struct perf_event
*event
)
4466 struct hw_perf_event
*hwc
= &event
->hw
;
4467 struct perf_cpu_context
*cpuctx
;
4468 struct hlist_head
*head
;
4470 cpuctx
= &__get_cpu_var(perf_cpu_context
);
4472 if (hwc
->sample_period
) {
4473 hwc
->last_period
= hwc
->sample_period
;
4474 perf_swevent_set_period(event
);
4477 head
= find_swevent_head(cpuctx
, event
);
4478 if (WARN_ON_ONCE(!head
))
4481 hlist_add_head_rcu(&event
->hlist_entry
, head
);
4486 static void perf_swevent_disable(struct perf_event
*event
)
4488 hlist_del_rcu(&event
->hlist_entry
);
4491 static void perf_swevent_void(struct perf_event
*event
)
4495 static int perf_swevent_int(struct perf_event
*event
)
4500 /* Deref the hlist from the update side */
4501 static inline struct swevent_hlist
*
4502 swevent_hlist_deref(struct perf_cpu_context
*cpuctx
)
4504 return rcu_dereference_protected(cpuctx
->swevent_hlist
,
4505 lockdep_is_held(&cpuctx
->hlist_mutex
));
4508 static void swevent_hlist_release_rcu(struct rcu_head
*rcu_head
)
4510 struct swevent_hlist
*hlist
;
4512 hlist
= container_of(rcu_head
, struct swevent_hlist
, rcu_head
);
4516 static void swevent_hlist_release(struct perf_cpu_context
*cpuctx
)
4518 struct swevent_hlist
*hlist
= swevent_hlist_deref(cpuctx
);
4523 rcu_assign_pointer(cpuctx
->swevent_hlist
, NULL
);
4524 call_rcu(&hlist
->rcu_head
, swevent_hlist_release_rcu
);
4527 static void swevent_hlist_put_cpu(struct perf_event
*event
, int cpu
)
4529 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
4531 mutex_lock(&cpuctx
->hlist_mutex
);
4533 if (!--cpuctx
->hlist_refcount
)
4534 swevent_hlist_release(cpuctx
);
4536 mutex_unlock(&cpuctx
->hlist_mutex
);
4539 static void swevent_hlist_put(struct perf_event
*event
)
4543 if (event
->cpu
!= -1) {
4544 swevent_hlist_put_cpu(event
, event
->cpu
);
4548 for_each_possible_cpu(cpu
)
4549 swevent_hlist_put_cpu(event
, cpu
);
4552 static int swevent_hlist_get_cpu(struct perf_event
*event
, int cpu
)
4554 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
4557 mutex_lock(&cpuctx
->hlist_mutex
);
4559 if (!swevent_hlist_deref(cpuctx
) && cpu_online(cpu
)) {
4560 struct swevent_hlist
*hlist
;
4562 hlist
= kzalloc(sizeof(*hlist
), GFP_KERNEL
);
4567 rcu_assign_pointer(cpuctx
->swevent_hlist
, hlist
);
4569 cpuctx
->hlist_refcount
++;
4571 mutex_unlock(&cpuctx
->hlist_mutex
);
4576 static int swevent_hlist_get(struct perf_event
*event
)
4579 int cpu
, failed_cpu
;
4581 if (event
->cpu
!= -1)
4582 return swevent_hlist_get_cpu(event
, event
->cpu
);
4585 for_each_possible_cpu(cpu
) {
4586 err
= swevent_hlist_get_cpu(event
, cpu
);
4596 for_each_possible_cpu(cpu
) {
4597 if (cpu
== failed_cpu
)
4599 swevent_hlist_put_cpu(event
, cpu
);
4606 atomic_t perf_swevent_enabled
[PERF_COUNT_SW_MAX
];
4608 static void sw_perf_event_destroy(struct perf_event
*event
)
4610 u64 event_id
= event
->attr
.config
;
4612 WARN_ON(event
->parent
);
4614 atomic_dec(&perf_swevent_enabled
[event_id
]);
4615 swevent_hlist_put(event
);
4618 static int perf_swevent_init(struct perf_event
*event
)
4620 int event_id
= event
->attr
.config
;
4622 if (event
->attr
.type
!= PERF_TYPE_SOFTWARE
)
4626 case PERF_COUNT_SW_CPU_CLOCK
:
4627 case PERF_COUNT_SW_TASK_CLOCK
:
4634 if (event_id
> PERF_COUNT_SW_MAX
)
4637 if (!event
->parent
) {
4640 err
= swevent_hlist_get(event
);
4644 atomic_inc(&perf_swevent_enabled
[event_id
]);
4645 event
->destroy
= sw_perf_event_destroy
;
4651 static struct pmu perf_swevent
= {
4652 .event_init
= perf_swevent_init
,
4653 .enable
= perf_swevent_enable
,
4654 .disable
= perf_swevent_disable
,
4655 .start
= perf_swevent_int
,
4656 .stop
= perf_swevent_void
,
4657 .read
= perf_swevent_read
,
4658 .unthrottle
= perf_swevent_void
, /* hwc->interrupts already reset */
4661 #ifdef CONFIG_EVENT_TRACING
4663 static int perf_tp_filter_match(struct perf_event
*event
,
4664 struct perf_sample_data
*data
)
4666 void *record
= data
->raw
->data
;
4668 if (likely(!event
->filter
) || filter_match_preds(event
->filter
, record
))
4673 static int perf_tp_event_match(struct perf_event
*event
,
4674 struct perf_sample_data
*data
,
4675 struct pt_regs
*regs
)
4678 * All tracepoints are from kernel-space.
4680 if (event
->attr
.exclude_kernel
)
4683 if (!perf_tp_filter_match(event
, data
))
4689 void perf_tp_event(u64 addr
, u64 count
, void *record
, int entry_size
,
4690 struct pt_regs
*regs
, struct hlist_head
*head
, int rctx
)
4692 struct perf_sample_data data
;
4693 struct perf_event
*event
;
4694 struct hlist_node
*node
;
4696 struct perf_raw_record raw
= {
4701 perf_sample_data_init(&data
, addr
);
4704 hlist_for_each_entry_rcu(event
, node
, head
, hlist_entry
) {
4705 if (perf_tp_event_match(event
, &data
, regs
))
4706 perf_swevent_add(event
, count
, 1, &data
, regs
);
4709 perf_swevent_put_recursion_context(rctx
);
4711 EXPORT_SYMBOL_GPL(perf_tp_event
);
4713 static void tp_perf_event_destroy(struct perf_event
*event
)
4715 perf_trace_destroy(event
);
4718 static int perf_tp_event_init(struct perf_event
*event
)
4722 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
4726 * Raw tracepoint data is a severe data leak, only allow root to
4729 if ((event
->attr
.sample_type
& PERF_SAMPLE_RAW
) &&
4730 perf_paranoid_tracepoint_raw() &&
4731 !capable(CAP_SYS_ADMIN
))
4734 err
= perf_trace_init(event
);
4738 event
->destroy
= tp_perf_event_destroy
;
4743 static struct pmu perf_tracepoint
= {
4744 .event_init
= perf_tp_event_init
,
4745 .enable
= perf_trace_enable
,
4746 .disable
= perf_trace_disable
,
4747 .start
= perf_swevent_int
,
4748 .stop
= perf_swevent_void
,
4749 .read
= perf_swevent_read
,
4750 .unthrottle
= perf_swevent_void
,
4753 static inline void perf_tp_register(void)
4755 perf_pmu_register(&perf_tracepoint
);
4758 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
)
4763 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
4766 filter_str
= strndup_user(arg
, PAGE_SIZE
);
4767 if (IS_ERR(filter_str
))
4768 return PTR_ERR(filter_str
);
4770 ret
= ftrace_profile_set_filter(event
, event
->attr
.config
, filter_str
);
4776 static void perf_event_free_filter(struct perf_event
*event
)
4778 ftrace_profile_free_filter(event
);
4783 static inline void perf_tp_register(void)
4787 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
)
4792 static void perf_event_free_filter(struct perf_event
*event
)
4796 #endif /* CONFIG_EVENT_TRACING */
4798 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4799 void perf_bp_event(struct perf_event
*bp
, void *data
)
4801 struct perf_sample_data sample
;
4802 struct pt_regs
*regs
= data
;
4804 perf_sample_data_init(&sample
, bp
->attr
.bp_addr
);
4806 if (!perf_exclude_event(bp
, regs
))
4807 perf_swevent_add(bp
, 1, 1, &sample
, regs
);
4812 * hrtimer based swevent callback
4815 static enum hrtimer_restart
perf_swevent_hrtimer(struct hrtimer
*hrtimer
)
4817 enum hrtimer_restart ret
= HRTIMER_RESTART
;
4818 struct perf_sample_data data
;
4819 struct pt_regs
*regs
;
4820 struct perf_event
*event
;
4823 event
= container_of(hrtimer
, struct perf_event
, hw
.hrtimer
);
4824 event
->pmu
->read(event
);
4826 perf_sample_data_init(&data
, 0);
4827 data
.period
= event
->hw
.last_period
;
4828 regs
= get_irq_regs();
4830 if (regs
&& !perf_exclude_event(event
, regs
)) {
4831 if (!(event
->attr
.exclude_idle
&& current
->pid
== 0))
4832 if (perf_event_overflow(event
, 0, &data
, regs
))
4833 ret
= HRTIMER_NORESTART
;
4836 period
= max_t(u64
, 10000, event
->hw
.sample_period
);
4837 hrtimer_forward_now(hrtimer
, ns_to_ktime(period
));
4842 static void perf_swevent_start_hrtimer(struct perf_event
*event
)
4844 struct hw_perf_event
*hwc
= &event
->hw
;
4846 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
4847 hwc
->hrtimer
.function
= perf_swevent_hrtimer
;
4848 if (hwc
->sample_period
) {
4851 if (hwc
->remaining
) {
4852 if (hwc
->remaining
< 0)
4855 period
= hwc
->remaining
;
4858 period
= max_t(u64
, 10000, hwc
->sample_period
);
4860 __hrtimer_start_range_ns(&hwc
->hrtimer
,
4861 ns_to_ktime(period
), 0,
4862 HRTIMER_MODE_REL
, 0);
4866 static void perf_swevent_cancel_hrtimer(struct perf_event
*event
)
4868 struct hw_perf_event
*hwc
= &event
->hw
;
4870 if (hwc
->sample_period
) {
4871 ktime_t remaining
= hrtimer_get_remaining(&hwc
->hrtimer
);
4872 hwc
->remaining
= ktime_to_ns(remaining
);
4874 hrtimer_cancel(&hwc
->hrtimer
);
4879 * Software event: cpu wall time clock
4882 static void cpu_clock_event_update(struct perf_event
*event
)
4884 int cpu
= raw_smp_processor_id();
4888 now
= cpu_clock(cpu
);
4889 prev
= local64_xchg(&event
->hw
.prev_count
, now
);
4890 local64_add(now
- prev
, &event
->count
);
4893 static int cpu_clock_event_enable(struct perf_event
*event
)
4895 struct hw_perf_event
*hwc
= &event
->hw
;
4896 int cpu
= raw_smp_processor_id();
4898 local64_set(&hwc
->prev_count
, cpu_clock(cpu
));
4899 perf_swevent_start_hrtimer(event
);
4904 static void cpu_clock_event_disable(struct perf_event
*event
)
4906 perf_swevent_cancel_hrtimer(event
);
4907 cpu_clock_event_update(event
);
4910 static void cpu_clock_event_read(struct perf_event
*event
)
4912 cpu_clock_event_update(event
);
4915 static int cpu_clock_event_init(struct perf_event
*event
)
4917 if (event
->attr
.type
!= PERF_TYPE_SOFTWARE
)
4920 if (event
->attr
.config
!= PERF_COUNT_SW_CPU_CLOCK
)
4926 static struct pmu perf_cpu_clock
= {
4927 .event_init
= cpu_clock_event_init
,
4928 .enable
= cpu_clock_event_enable
,
4929 .disable
= cpu_clock_event_disable
,
4930 .read
= cpu_clock_event_read
,
4934 * Software event: task time clock
4937 static void task_clock_event_update(struct perf_event
*event
, u64 now
)
4942 prev
= local64_xchg(&event
->hw
.prev_count
, now
);
4944 local64_add(delta
, &event
->count
);
4947 static int task_clock_event_enable(struct perf_event
*event
)
4949 struct hw_perf_event
*hwc
= &event
->hw
;
4952 now
= event
->ctx
->time
;
4954 local64_set(&hwc
->prev_count
, now
);
4956 perf_swevent_start_hrtimer(event
);
4961 static void task_clock_event_disable(struct perf_event
*event
)
4963 perf_swevent_cancel_hrtimer(event
);
4964 task_clock_event_update(event
, event
->ctx
->time
);
4968 static void task_clock_event_read(struct perf_event
*event
)
4973 update_context_time(event
->ctx
);
4974 time
= event
->ctx
->time
;
4976 u64 now
= perf_clock();
4977 u64 delta
= now
- event
->ctx
->timestamp
;
4978 time
= event
->ctx
->time
+ delta
;
4981 task_clock_event_update(event
, time
);
4984 static int task_clock_event_init(struct perf_event
*event
)
4986 if (event
->attr
.type
!= PERF_TYPE_SOFTWARE
)
4989 if (event
->attr
.config
!= PERF_COUNT_SW_TASK_CLOCK
)
4995 static struct pmu perf_task_clock
= {
4996 .event_init
= task_clock_event_init
,
4997 .enable
= task_clock_event_enable
,
4998 .disable
= task_clock_event_disable
,
4999 .read
= task_clock_event_read
,
5002 static LIST_HEAD(pmus
);
5003 static DEFINE_MUTEX(pmus_lock
);
5004 static struct srcu_struct pmus_srcu
;
5006 int perf_pmu_register(struct pmu
*pmu
)
5008 mutex_lock(&pmus_lock
);
5009 list_add_rcu(&pmu
->entry
, &pmus
);
5010 mutex_unlock(&pmus_lock
);
5015 void perf_pmu_unregister(struct pmu
*pmu
)
5017 mutex_lock(&pmus_lock
);
5018 list_del_rcu(&pmu
->entry
);
5019 mutex_unlock(&pmus_lock
);
5021 synchronize_srcu(&pmus_srcu
);
5024 struct pmu
*perf_init_event(struct perf_event
*event
)
5026 struct pmu
*pmu
= NULL
;
5029 idx
= srcu_read_lock(&pmus_srcu
);
5030 list_for_each_entry_rcu(pmu
, &pmus
, entry
) {
5031 int ret
= pmu
->event_init(event
);
5034 if (ret
!= -ENOENT
) {
5039 srcu_read_unlock(&pmus_srcu
, idx
);
5045 * Allocate and initialize a event structure
5047 static struct perf_event
*
5048 perf_event_alloc(struct perf_event_attr
*attr
,
5050 struct perf_event_context
*ctx
,
5051 struct perf_event
*group_leader
,
5052 struct perf_event
*parent_event
,
5053 perf_overflow_handler_t overflow_handler
,
5057 struct perf_event
*event
;
5058 struct hw_perf_event
*hwc
;
5061 event
= kzalloc(sizeof(*event
), gfpflags
);
5063 return ERR_PTR(-ENOMEM
);
5066 * Single events are their own group leaders, with an
5067 * empty sibling list:
5070 group_leader
= event
;
5072 mutex_init(&event
->child_mutex
);
5073 INIT_LIST_HEAD(&event
->child_list
);
5075 INIT_LIST_HEAD(&event
->group_entry
);
5076 INIT_LIST_HEAD(&event
->event_entry
);
5077 INIT_LIST_HEAD(&event
->sibling_list
);
5078 init_waitqueue_head(&event
->waitq
);
5080 mutex_init(&event
->mmap_mutex
);
5083 event
->attr
= *attr
;
5084 event
->group_leader
= group_leader
;
5089 event
->parent
= parent_event
;
5091 event
->ns
= get_pid_ns(current
->nsproxy
->pid_ns
);
5092 event
->id
= atomic64_inc_return(&perf_event_id
);
5094 event
->state
= PERF_EVENT_STATE_INACTIVE
;
5096 if (!overflow_handler
&& parent_event
)
5097 overflow_handler
= parent_event
->overflow_handler
;
5099 event
->overflow_handler
= overflow_handler
;
5102 event
->state
= PERF_EVENT_STATE_OFF
;
5107 hwc
->sample_period
= attr
->sample_period
;
5108 if (attr
->freq
&& attr
->sample_freq
)
5109 hwc
->sample_period
= 1;
5110 hwc
->last_period
= hwc
->sample_period
;
5112 local64_set(&hwc
->period_left
, hwc
->sample_period
);
5115 * we currently do not support PERF_FORMAT_GROUP on inherited events
5117 if (attr
->inherit
&& (attr
->read_format
& PERF_FORMAT_GROUP
))
5120 pmu
= perf_init_event(event
);
5126 else if (IS_ERR(pmu
))
5131 put_pid_ns(event
->ns
);
5133 return ERR_PTR(err
);
5138 if (!event
->parent
) {
5139 atomic_inc(&nr_events
);
5140 if (event
->attr
.mmap
|| event
->attr
.mmap_data
)
5141 atomic_inc(&nr_mmap_events
);
5142 if (event
->attr
.comm
)
5143 atomic_inc(&nr_comm_events
);
5144 if (event
->attr
.task
)
5145 atomic_inc(&nr_task_events
);
5146 if (event
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
) {
5147 err
= get_callchain_buffers();
5150 return ERR_PTR(err
);
5158 static int perf_copy_attr(struct perf_event_attr __user
*uattr
,
5159 struct perf_event_attr
*attr
)
5164 if (!access_ok(VERIFY_WRITE
, uattr
, PERF_ATTR_SIZE_VER0
))
5168 * zero the full structure, so that a short copy will be nice.
5170 memset(attr
, 0, sizeof(*attr
));
5172 ret
= get_user(size
, &uattr
->size
);
5176 if (size
> PAGE_SIZE
) /* silly large */
5179 if (!size
) /* abi compat */
5180 size
= PERF_ATTR_SIZE_VER0
;
5182 if (size
< PERF_ATTR_SIZE_VER0
)
5186 * If we're handed a bigger struct than we know of,
5187 * ensure all the unknown bits are 0 - i.e. new
5188 * user-space does not rely on any kernel feature
5189 * extensions we dont know about yet.
5191 if (size
> sizeof(*attr
)) {
5192 unsigned char __user
*addr
;
5193 unsigned char __user
*end
;
5196 addr
= (void __user
*)uattr
+ sizeof(*attr
);
5197 end
= (void __user
*)uattr
+ size
;
5199 for (; addr
< end
; addr
++) {
5200 ret
= get_user(val
, addr
);
5206 size
= sizeof(*attr
);
5209 ret
= copy_from_user(attr
, uattr
, size
);
5214 * If the type exists, the corresponding creation will verify
5217 if (attr
->type
>= PERF_TYPE_MAX
)
5220 if (attr
->__reserved_1
)
5223 if (attr
->sample_type
& ~(PERF_SAMPLE_MAX
-1))
5226 if (attr
->read_format
& ~(PERF_FORMAT_MAX
-1))
5233 put_user(sizeof(*attr
), &uattr
->size
);
5239 perf_event_set_output(struct perf_event
*event
, struct perf_event
*output_event
)
5241 struct perf_buffer
*buffer
= NULL
, *old_buffer
= NULL
;
5247 /* don't allow circular references */
5248 if (event
== output_event
)
5252 * Don't allow cross-cpu buffers
5254 if (output_event
->cpu
!= event
->cpu
)
5258 * If its not a per-cpu buffer, it must be the same task.
5260 if (output_event
->cpu
== -1 && output_event
->ctx
!= event
->ctx
)
5264 mutex_lock(&event
->mmap_mutex
);
5265 /* Can't redirect output if we've got an active mmap() */
5266 if (atomic_read(&event
->mmap_count
))
5270 /* get the buffer we want to redirect to */
5271 buffer
= perf_buffer_get(output_event
);
5276 old_buffer
= event
->buffer
;
5277 rcu_assign_pointer(event
->buffer
, buffer
);
5280 mutex_unlock(&event
->mmap_mutex
);
5283 perf_buffer_put(old_buffer
);
5289 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5291 * @attr_uptr: event_id type attributes for monitoring/sampling
5294 * @group_fd: group leader event fd
5296 SYSCALL_DEFINE5(perf_event_open
,
5297 struct perf_event_attr __user
*, attr_uptr
,
5298 pid_t
, pid
, int, cpu
, int, group_fd
, unsigned long, flags
)
5300 struct perf_event
*event
, *group_leader
= NULL
, *output_event
= NULL
;
5301 struct perf_event_attr attr
;
5302 struct perf_event_context
*ctx
;
5303 struct file
*event_file
= NULL
;
5304 struct file
*group_file
= NULL
;
5306 int fput_needed
= 0;
5309 /* for future expandability... */
5310 if (flags
& ~(PERF_FLAG_FD_NO_GROUP
| PERF_FLAG_FD_OUTPUT
))
5313 err
= perf_copy_attr(attr_uptr
, &attr
);
5317 if (!attr
.exclude_kernel
) {
5318 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN
))
5323 if (attr
.sample_freq
> sysctl_perf_event_sample_rate
)
5327 event_fd
= get_unused_fd_flags(O_RDWR
);
5332 * Get the target context (task or percpu):
5334 ctx
= find_get_context(pid
, cpu
);
5340 if (group_fd
!= -1) {
5341 group_leader
= perf_fget_light(group_fd
, &fput_needed
);
5342 if (IS_ERR(group_leader
)) {
5343 err
= PTR_ERR(group_leader
);
5344 goto err_put_context
;
5346 group_file
= group_leader
->filp
;
5347 if (flags
& PERF_FLAG_FD_OUTPUT
)
5348 output_event
= group_leader
;
5349 if (flags
& PERF_FLAG_FD_NO_GROUP
)
5350 group_leader
= NULL
;
5354 * Look up the group leader (we will attach this event to it):
5360 * Do not allow a recursive hierarchy (this new sibling
5361 * becoming part of another group-sibling):
5363 if (group_leader
->group_leader
!= group_leader
)
5364 goto err_put_context
;
5366 * Do not allow to attach to a group in a different
5367 * task or CPU context:
5369 if (group_leader
->ctx
!= ctx
)
5370 goto err_put_context
;
5372 * Only a group leader can be exclusive or pinned
5374 if (attr
.exclusive
|| attr
.pinned
)
5375 goto err_put_context
;
5378 event
= perf_event_alloc(&attr
, cpu
, ctx
, group_leader
,
5379 NULL
, NULL
, GFP_KERNEL
);
5380 if (IS_ERR(event
)) {
5381 err
= PTR_ERR(event
);
5382 goto err_put_context
;
5386 err
= perf_event_set_output(event
, output_event
);
5388 goto err_free_put_context
;
5391 event_file
= anon_inode_getfile("[perf_event]", &perf_fops
, event
, O_RDWR
);
5392 if (IS_ERR(event_file
)) {
5393 err
= PTR_ERR(event_file
);
5394 goto err_free_put_context
;
5397 event
->filp
= event_file
;
5398 WARN_ON_ONCE(ctx
->parent_ctx
);
5399 mutex_lock(&ctx
->mutex
);
5400 perf_install_in_context(ctx
, event
, cpu
);
5402 mutex_unlock(&ctx
->mutex
);
5404 event
->owner
= current
;
5405 get_task_struct(current
);
5406 mutex_lock(¤t
->perf_event_mutex
);
5407 list_add_tail(&event
->owner_entry
, ¤t
->perf_event_list
);
5408 mutex_unlock(¤t
->perf_event_mutex
);
5411 * Drop the reference on the group_event after placing the
5412 * new event on the sibling_list. This ensures destruction
5413 * of the group leader will find the pointer to itself in
5414 * perf_group_detach().
5416 fput_light(group_file
, fput_needed
);
5417 fd_install(event_fd
, event_file
);
5420 err_free_put_context
:
5423 fput_light(group_file
, fput_needed
);
5426 put_unused_fd(event_fd
);
5431 * perf_event_create_kernel_counter
5433 * @attr: attributes of the counter to create
5434 * @cpu: cpu in which the counter is bound
5435 * @pid: task to profile
5438 perf_event_create_kernel_counter(struct perf_event_attr
*attr
, int cpu
,
5440 perf_overflow_handler_t overflow_handler
)
5442 struct perf_event
*event
;
5443 struct perf_event_context
*ctx
;
5447 * Get the target context (task or percpu):
5450 ctx
= find_get_context(pid
, cpu
);
5456 event
= perf_event_alloc(attr
, cpu
, ctx
, NULL
,
5457 NULL
, overflow_handler
, GFP_KERNEL
);
5458 if (IS_ERR(event
)) {
5459 err
= PTR_ERR(event
);
5460 goto err_put_context
;
5464 WARN_ON_ONCE(ctx
->parent_ctx
);
5465 mutex_lock(&ctx
->mutex
);
5466 perf_install_in_context(ctx
, event
, cpu
);
5468 mutex_unlock(&ctx
->mutex
);
5470 event
->owner
= current
;
5471 get_task_struct(current
);
5472 mutex_lock(¤t
->perf_event_mutex
);
5473 list_add_tail(&event
->owner_entry
, ¤t
->perf_event_list
);
5474 mutex_unlock(¤t
->perf_event_mutex
);
5481 return ERR_PTR(err
);
5483 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter
);
5486 * inherit a event from parent task to child task:
5488 static struct perf_event
*
5489 inherit_event(struct perf_event
*parent_event
,
5490 struct task_struct
*parent
,
5491 struct perf_event_context
*parent_ctx
,
5492 struct task_struct
*child
,
5493 struct perf_event
*group_leader
,
5494 struct perf_event_context
*child_ctx
)
5496 struct perf_event
*child_event
;
5499 * Instead of creating recursive hierarchies of events,
5500 * we link inherited events back to the original parent,
5501 * which has a filp for sure, which we use as the reference
5504 if (parent_event
->parent
)
5505 parent_event
= parent_event
->parent
;
5507 child_event
= perf_event_alloc(&parent_event
->attr
,
5508 parent_event
->cpu
, child_ctx
,
5509 group_leader
, parent_event
,
5511 if (IS_ERR(child_event
))
5516 * Make the child state follow the state of the parent event,
5517 * not its attr.disabled bit. We hold the parent's mutex,
5518 * so we won't race with perf_event_{en, dis}able_family.
5520 if (parent_event
->state
>= PERF_EVENT_STATE_INACTIVE
)
5521 child_event
->state
= PERF_EVENT_STATE_INACTIVE
;
5523 child_event
->state
= PERF_EVENT_STATE_OFF
;
5525 if (parent_event
->attr
.freq
) {
5526 u64 sample_period
= parent_event
->hw
.sample_period
;
5527 struct hw_perf_event
*hwc
= &child_event
->hw
;
5529 hwc
->sample_period
= sample_period
;
5530 hwc
->last_period
= sample_period
;
5532 local64_set(&hwc
->period_left
, sample_period
);
5535 child_event
->overflow_handler
= parent_event
->overflow_handler
;
5538 * Link it up in the child's context:
5540 add_event_to_ctx(child_event
, child_ctx
);
5543 * Get a reference to the parent filp - we will fput it
5544 * when the child event exits. This is safe to do because
5545 * we are in the parent and we know that the filp still
5546 * exists and has a nonzero count:
5548 atomic_long_inc(&parent_event
->filp
->f_count
);
5551 * Link this into the parent event's child list
5553 WARN_ON_ONCE(parent_event
->ctx
->parent_ctx
);
5554 mutex_lock(&parent_event
->child_mutex
);
5555 list_add_tail(&child_event
->child_list
, &parent_event
->child_list
);
5556 mutex_unlock(&parent_event
->child_mutex
);
5561 static int inherit_group(struct perf_event
*parent_event
,
5562 struct task_struct
*parent
,
5563 struct perf_event_context
*parent_ctx
,
5564 struct task_struct
*child
,
5565 struct perf_event_context
*child_ctx
)
5567 struct perf_event
*leader
;
5568 struct perf_event
*sub
;
5569 struct perf_event
*child_ctr
;
5571 leader
= inherit_event(parent_event
, parent
, parent_ctx
,
5572 child
, NULL
, child_ctx
);
5574 return PTR_ERR(leader
);
5575 list_for_each_entry(sub
, &parent_event
->sibling_list
, group_entry
) {
5576 child_ctr
= inherit_event(sub
, parent
, parent_ctx
,
5577 child
, leader
, child_ctx
);
5578 if (IS_ERR(child_ctr
))
5579 return PTR_ERR(child_ctr
);
5584 static void sync_child_event(struct perf_event
*child_event
,
5585 struct task_struct
*child
)
5587 struct perf_event
*parent_event
= child_event
->parent
;
5590 if (child_event
->attr
.inherit_stat
)
5591 perf_event_read_event(child_event
, child
);
5593 child_val
= perf_event_count(child_event
);
5596 * Add back the child's count to the parent's count:
5598 atomic64_add(child_val
, &parent_event
->child_count
);
5599 atomic64_add(child_event
->total_time_enabled
,
5600 &parent_event
->child_total_time_enabled
);
5601 atomic64_add(child_event
->total_time_running
,
5602 &parent_event
->child_total_time_running
);
5605 * Remove this event from the parent's list
5607 WARN_ON_ONCE(parent_event
->ctx
->parent_ctx
);
5608 mutex_lock(&parent_event
->child_mutex
);
5609 list_del_init(&child_event
->child_list
);
5610 mutex_unlock(&parent_event
->child_mutex
);
5613 * Release the parent event, if this was the last
5616 fput(parent_event
->filp
);
5620 __perf_event_exit_task(struct perf_event
*child_event
,
5621 struct perf_event_context
*child_ctx
,
5622 struct task_struct
*child
)
5624 struct perf_event
*parent_event
;
5626 perf_event_remove_from_context(child_event
);
5628 parent_event
= child_event
->parent
;
5630 * It can happen that parent exits first, and has events
5631 * that are still around due to the child reference. These
5632 * events need to be zapped - but otherwise linger.
5635 sync_child_event(child_event
, child
);
5636 free_event(child_event
);
5641 * When a child task exits, feed back event values to parent events.
5643 void perf_event_exit_task(struct task_struct
*child
)
5645 struct perf_event
*child_event
, *tmp
;
5646 struct perf_event_context
*child_ctx
;
5647 unsigned long flags
;
5649 if (likely(!child
->perf_event_ctxp
)) {
5650 perf_event_task(child
, NULL
, 0);
5654 local_irq_save(flags
);
5656 * We can't reschedule here because interrupts are disabled,
5657 * and either child is current or it is a task that can't be
5658 * scheduled, so we are now safe from rescheduling changing
5661 child_ctx
= child
->perf_event_ctxp
;
5662 __perf_event_task_sched_out(child_ctx
);
5665 * Take the context lock here so that if find_get_context is
5666 * reading child->perf_event_ctxp, we wait until it has
5667 * incremented the context's refcount before we do put_ctx below.
5669 raw_spin_lock(&child_ctx
->lock
);
5670 child
->perf_event_ctxp
= NULL
;
5672 * If this context is a clone; unclone it so it can't get
5673 * swapped to another process while we're removing all
5674 * the events from it.
5676 unclone_ctx(child_ctx
);
5677 update_context_time(child_ctx
);
5678 raw_spin_unlock_irqrestore(&child_ctx
->lock
, flags
);
5681 * Report the task dead after unscheduling the events so that we
5682 * won't get any samples after PERF_RECORD_EXIT. We can however still
5683 * get a few PERF_RECORD_READ events.
5685 perf_event_task(child
, child_ctx
, 0);
5688 * We can recurse on the same lock type through:
5690 * __perf_event_exit_task()
5691 * sync_child_event()
5692 * fput(parent_event->filp)
5694 * mutex_lock(&ctx->mutex)
5696 * But since its the parent context it won't be the same instance.
5698 mutex_lock(&child_ctx
->mutex
);
5701 list_for_each_entry_safe(child_event
, tmp
, &child_ctx
->pinned_groups
,
5703 __perf_event_exit_task(child_event
, child_ctx
, child
);
5705 list_for_each_entry_safe(child_event
, tmp
, &child_ctx
->flexible_groups
,
5707 __perf_event_exit_task(child_event
, child_ctx
, child
);
5710 * If the last event was a group event, it will have appended all
5711 * its siblings to the list, but we obtained 'tmp' before that which
5712 * will still point to the list head terminating the iteration.
5714 if (!list_empty(&child_ctx
->pinned_groups
) ||
5715 !list_empty(&child_ctx
->flexible_groups
))
5718 mutex_unlock(&child_ctx
->mutex
);
5723 static void perf_free_event(struct perf_event
*event
,
5724 struct perf_event_context
*ctx
)
5726 struct perf_event
*parent
= event
->parent
;
5728 if (WARN_ON_ONCE(!parent
))
5731 mutex_lock(&parent
->child_mutex
);
5732 list_del_init(&event
->child_list
);
5733 mutex_unlock(&parent
->child_mutex
);
5737 perf_group_detach(event
);
5738 list_del_event(event
, ctx
);
5743 * free an unexposed, unused context as created by inheritance by
5744 * init_task below, used by fork() in case of fail.
5746 void perf_event_free_task(struct task_struct
*task
)
5748 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
5749 struct perf_event
*event
, *tmp
;
5754 mutex_lock(&ctx
->mutex
);
5756 list_for_each_entry_safe(event
, tmp
, &ctx
->pinned_groups
, group_entry
)
5757 perf_free_event(event
, ctx
);
5759 list_for_each_entry_safe(event
, tmp
, &ctx
->flexible_groups
,
5761 perf_free_event(event
, ctx
);
5763 if (!list_empty(&ctx
->pinned_groups
) ||
5764 !list_empty(&ctx
->flexible_groups
))
5767 mutex_unlock(&ctx
->mutex
);
5773 inherit_task_group(struct perf_event
*event
, struct task_struct
*parent
,
5774 struct perf_event_context
*parent_ctx
,
5775 struct task_struct
*child
,
5779 struct perf_event_context
*child_ctx
= child
->perf_event_ctxp
;
5781 if (!event
->attr
.inherit
) {
5788 * This is executed from the parent task context, so
5789 * inherit events that have been marked for cloning.
5790 * First allocate and initialize a context for the
5794 child_ctx
= kzalloc(sizeof(struct perf_event_context
),
5799 __perf_event_init_context(child_ctx
, child
);
5800 child
->perf_event_ctxp
= child_ctx
;
5801 get_task_struct(child
);
5804 ret
= inherit_group(event
, parent
, parent_ctx
,
5815 * Initialize the perf_event context in task_struct
5817 int perf_event_init_task(struct task_struct
*child
)
5819 struct perf_event_context
*child_ctx
, *parent_ctx
;
5820 struct perf_event_context
*cloned_ctx
;
5821 struct perf_event
*event
;
5822 struct task_struct
*parent
= current
;
5823 int inherited_all
= 1;
5826 child
->perf_event_ctxp
= NULL
;
5828 mutex_init(&child
->perf_event_mutex
);
5829 INIT_LIST_HEAD(&child
->perf_event_list
);
5831 if (likely(!parent
->perf_event_ctxp
))
5835 * If the parent's context is a clone, pin it so it won't get
5838 parent_ctx
= perf_pin_task_context(parent
);
5841 * No need to check if parent_ctx != NULL here; since we saw
5842 * it non-NULL earlier, the only reason for it to become NULL
5843 * is if we exit, and since we're currently in the middle of
5844 * a fork we can't be exiting at the same time.
5848 * Lock the parent list. No need to lock the child - not PID
5849 * hashed yet and not running, so nobody can access it.
5851 mutex_lock(&parent_ctx
->mutex
);
5854 * We dont have to disable NMIs - we are only looking at
5855 * the list, not manipulating it:
5857 list_for_each_entry(event
, &parent_ctx
->pinned_groups
, group_entry
) {
5858 ret
= inherit_task_group(event
, parent
, parent_ctx
, child
,
5864 list_for_each_entry(event
, &parent_ctx
->flexible_groups
, group_entry
) {
5865 ret
= inherit_task_group(event
, parent
, parent_ctx
, child
,
5871 child_ctx
= child
->perf_event_ctxp
;
5873 if (child_ctx
&& inherited_all
) {
5875 * Mark the child context as a clone of the parent
5876 * context, or of whatever the parent is a clone of.
5877 * Note that if the parent is a clone, it could get
5878 * uncloned at any point, but that doesn't matter
5879 * because the list of events and the generation
5880 * count can't have changed since we took the mutex.
5882 cloned_ctx
= rcu_dereference(parent_ctx
->parent_ctx
);
5884 child_ctx
->parent_ctx
= cloned_ctx
;
5885 child_ctx
->parent_gen
= parent_ctx
->parent_gen
;
5887 child_ctx
->parent_ctx
= parent_ctx
;
5888 child_ctx
->parent_gen
= parent_ctx
->generation
;
5890 get_ctx(child_ctx
->parent_ctx
);
5893 mutex_unlock(&parent_ctx
->mutex
);
5895 perf_unpin_context(parent_ctx
);
5900 static void __init
perf_event_init_all_cpus(void)
5903 struct perf_cpu_context
*cpuctx
;
5905 for_each_possible_cpu(cpu
) {
5906 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5907 mutex_init(&cpuctx
->hlist_mutex
);
5908 __perf_event_init_context(&cpuctx
->ctx
, NULL
);
5912 static void __cpuinit
perf_event_init_cpu(int cpu
)
5914 struct perf_cpu_context
*cpuctx
;
5916 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5918 spin_lock(&perf_resource_lock
);
5919 cpuctx
->max_pertask
= perf_max_events
- perf_reserved_percpu
;
5920 spin_unlock(&perf_resource_lock
);
5922 mutex_lock(&cpuctx
->hlist_mutex
);
5923 if (cpuctx
->hlist_refcount
> 0) {
5924 struct swevent_hlist
*hlist
;
5926 hlist
= kzalloc(sizeof(*hlist
), GFP_KERNEL
);
5927 WARN_ON_ONCE(!hlist
);
5928 rcu_assign_pointer(cpuctx
->swevent_hlist
, hlist
);
5930 mutex_unlock(&cpuctx
->hlist_mutex
);
5933 #ifdef CONFIG_HOTPLUG_CPU
5934 static void __perf_event_exit_cpu(void *info
)
5936 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
5937 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
5938 struct perf_event
*event
, *tmp
;
5940 list_for_each_entry_safe(event
, tmp
, &ctx
->pinned_groups
, group_entry
)
5941 __perf_event_remove_from_context(event
);
5942 list_for_each_entry_safe(event
, tmp
, &ctx
->flexible_groups
, group_entry
)
5943 __perf_event_remove_from_context(event
);
5945 static void perf_event_exit_cpu(int cpu
)
5947 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5948 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
5950 mutex_lock(&cpuctx
->hlist_mutex
);
5951 swevent_hlist_release(cpuctx
);
5952 mutex_unlock(&cpuctx
->hlist_mutex
);
5954 mutex_lock(&ctx
->mutex
);
5955 smp_call_function_single(cpu
, __perf_event_exit_cpu
, NULL
, 1);
5956 mutex_unlock(&ctx
->mutex
);
5959 static inline void perf_event_exit_cpu(int cpu
) { }
5962 static int __cpuinit
5963 perf_cpu_notify(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
5965 unsigned int cpu
= (long)hcpu
;
5967 switch (action
& ~CPU_TASKS_FROZEN
) {
5969 case CPU_UP_PREPARE
:
5970 case CPU_DOWN_FAILED
:
5971 perf_event_init_cpu(cpu
);
5974 case CPU_UP_CANCELED
:
5975 case CPU_DOWN_PREPARE
:
5976 perf_event_exit_cpu(cpu
);
5986 void __init
perf_event_init(void)
5988 perf_event_init_all_cpus();
5989 init_srcu_struct(&pmus_srcu
);
5990 perf_pmu_register(&perf_swevent
);
5991 perf_pmu_register(&perf_cpu_clock
);
5992 perf_pmu_register(&perf_task_clock
);
5994 perf_cpu_notifier(perf_cpu_notify
);
5997 static ssize_t
perf_show_reserve_percpu(struct sysdev_class
*class,
5998 struct sysdev_class_attribute
*attr
,
6001 return sprintf(buf
, "%d\n", perf_reserved_percpu
);
6005 perf_set_reserve_percpu(struct sysdev_class
*class,
6006 struct sysdev_class_attribute
*attr
,
6010 struct perf_cpu_context
*cpuctx
;
6014 err
= strict_strtoul(buf
, 10, &val
);
6017 if (val
> perf_max_events
)
6020 spin_lock(&perf_resource_lock
);
6021 perf_reserved_percpu
= val
;
6022 for_each_online_cpu(cpu
) {
6023 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
6024 raw_spin_lock_irq(&cpuctx
->ctx
.lock
);
6025 mpt
= min(perf_max_events
- cpuctx
->ctx
.nr_events
,
6026 perf_max_events
- perf_reserved_percpu
);
6027 cpuctx
->max_pertask
= mpt
;
6028 raw_spin_unlock_irq(&cpuctx
->ctx
.lock
);
6030 spin_unlock(&perf_resource_lock
);
6035 static ssize_t
perf_show_overcommit(struct sysdev_class
*class,
6036 struct sysdev_class_attribute
*attr
,
6039 return sprintf(buf
, "%d\n", perf_overcommit
);
6043 perf_set_overcommit(struct sysdev_class
*class,
6044 struct sysdev_class_attribute
*attr
,
6045 const char *buf
, size_t count
)
6050 err
= strict_strtoul(buf
, 10, &val
);
6056 spin_lock(&perf_resource_lock
);
6057 perf_overcommit
= val
;
6058 spin_unlock(&perf_resource_lock
);
6063 static SYSDEV_CLASS_ATTR(
6066 perf_show_reserve_percpu
,
6067 perf_set_reserve_percpu
6070 static SYSDEV_CLASS_ATTR(
6073 perf_show_overcommit
,
6077 static struct attribute
*perfclass_attrs
[] = {
6078 &attr_reserve_percpu
.attr
,
6079 &attr_overcommit
.attr
,
6083 static struct attribute_group perfclass_attr_group
= {
6084 .attrs
= perfclass_attrs
,
6085 .name
= "perf_events",
6088 static int __init
perf_event_sysfs_init(void)
6090 return sysfs_create_group(&cpu_sysdev_class
.kset
.kobj
,
6091 &perfclass_attr_group
);
6093 device_initcall(perf_event_sysfs_init
);