4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/cpumask.h>
16 #include <linux/kernel.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/uaccess.h>
21 #include <asm/irq_regs.h>
23 #include <asm/stacktrace.h>
26 armpmu_map_cache_event(const unsigned (*cache_map
)
27 [PERF_COUNT_HW_CACHE_MAX
]
28 [PERF_COUNT_HW_CACHE_OP_MAX
]
29 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
32 unsigned int cache_type
, cache_op
, cache_result
, ret
;
34 cache_type
= (config
>> 0) & 0xff;
35 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
38 cache_op
= (config
>> 8) & 0xff;
39 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
42 cache_result
= (config
>> 16) & 0xff;
43 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
46 ret
= (int)(*cache_map
)[cache_type
][cache_op
][cache_result
];
48 if (ret
== CACHE_OP_UNSUPPORTED
)
55 armpmu_map_hw_event(const unsigned (*event_map
)[PERF_COUNT_HW_MAX
], u64 config
)
59 if (config
>= PERF_COUNT_HW_MAX
)
62 mapping
= (*event_map
)[config
];
63 return mapping
== HW_OP_UNSUPPORTED
? -ENOENT
: mapping
;
67 armpmu_map_raw_event(u32 raw_event_mask
, u64 config
)
69 return (int)(config
& raw_event_mask
);
73 armpmu_map_event(struct perf_event
*event
,
74 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
],
75 const unsigned (*cache_map
)
76 [PERF_COUNT_HW_CACHE_MAX
]
77 [PERF_COUNT_HW_CACHE_OP_MAX
]
78 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
81 u64 config
= event
->attr
.config
;
83 switch (event
->attr
.type
) {
84 case PERF_TYPE_HARDWARE
:
85 return armpmu_map_hw_event(event_map
, config
);
86 case PERF_TYPE_HW_CACHE
:
87 return armpmu_map_cache_event(cache_map
, config
);
89 return armpmu_map_raw_event(raw_event_mask
, config
);
91 if (event
->attr
.type
>= PERF_TYPE_MAX
)
92 return armpmu_map_raw_event(raw_event_mask
, config
);
98 int armpmu_event_set_period(struct perf_event
*event
)
100 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
101 struct hw_perf_event
*hwc
= &event
->hw
;
102 s64 left
= local64_read(&hwc
->period_left
);
103 s64 period
= hwc
->sample_period
;
106 /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
107 if (unlikely(period
!= hwc
->last_period
))
108 left
= period
- (hwc
->last_period
- left
);
110 if (unlikely(left
<= -period
)) {
112 local64_set(&hwc
->period_left
, left
);
113 hwc
->last_period
= period
;
117 if (unlikely(left
<= 0)) {
119 local64_set(&hwc
->period_left
, left
);
120 hwc
->last_period
= period
;
124 if (left
> (s64
)armpmu
->max_period
)
125 left
= armpmu
->max_period
;
127 local64_set(&hwc
->prev_count
, (u64
)-left
);
129 armpmu
->write_counter(event
, (u64
)(-left
) & 0xffffffff);
131 perf_event_update_userpage(event
);
136 u64
armpmu_event_update(struct perf_event
*event
)
138 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
139 struct hw_perf_event
*hwc
= &event
->hw
;
140 u64 delta
, prev_raw_count
, new_raw_count
;
143 prev_raw_count
= local64_read(&hwc
->prev_count
);
144 new_raw_count
= armpmu
->read_counter(event
);
146 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
147 new_raw_count
) != prev_raw_count
)
150 delta
= (new_raw_count
- prev_raw_count
) & armpmu
->max_period
;
152 local64_add(delta
, &event
->count
);
153 local64_sub(delta
, &hwc
->period_left
);
155 return new_raw_count
;
159 armpmu_read(struct perf_event
*event
)
161 armpmu_event_update(event
);
165 armpmu_stop(struct perf_event
*event
, int flags
)
167 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
168 struct hw_perf_event
*hwc
= &event
->hw
;
170 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->valid_cpus
))
173 * ARM pmu always has to update the counter, so ignore
174 * PERF_EF_UPDATE, see comments in armpmu_start().
176 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
177 armpmu
->disable(event
);
178 armpmu_event_update(event
);
179 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
183 static void armpmu_start(struct perf_event
*event
, int flags
)
185 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
186 struct hw_perf_event
*hwc
= &event
->hw
;
188 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->valid_cpus
))
191 * ARM pmu always has to reprogram the period, so ignore
192 * PERF_EF_RELOAD, see the comment below.
194 if (flags
& PERF_EF_RELOAD
)
195 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
199 * Set the period again. Some counters can't be stopped, so when we
200 * were stopped we simply disabled the IRQ source and the counter
201 * may have been left counting. If we don't do this step then we may
202 * get an interrupt too soon or *way* too late if the overflow has
203 * happened since disabling.
205 armpmu_event_set_period(event
);
206 armpmu
->enable(event
);
210 armpmu_del(struct perf_event
*event
, int flags
)
212 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
213 struct pmu_hw_events
*hw_events
= armpmu
->get_hw_events();
214 struct hw_perf_event
*hwc
= &event
->hw
;
217 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->valid_cpus
))
220 armpmu_stop(event
, PERF_EF_UPDATE
);
221 hw_events
->events
[idx
] = NULL
;
222 clear_bit(idx
, hw_events
->used_mask
);
224 perf_event_update_userpage(event
);
228 armpmu_add(struct perf_event
*event
, int flags
)
230 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
231 struct pmu_hw_events
*hw_events
= armpmu
->get_hw_events();
232 struct hw_perf_event
*hwc
= &event
->hw
;
236 /* An event following a process won't be stopped earlier */
237 if (!cpumask_test_cpu(smp_processor_id(), &armpmu
->valid_cpus
))
240 perf_pmu_disable(event
->pmu
);
242 /* If we don't have a space for the counter then finish early. */
243 idx
= armpmu
->get_event_idx(hw_events
, event
);
250 * If there is an event in the counter we are going to use then make
251 * sure it is disabled.
254 armpmu
->disable(event
);
255 hw_events
->events
[idx
] = event
;
257 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
258 if (flags
& PERF_EF_START
)
259 armpmu_start(event
, PERF_EF_RELOAD
);
261 /* Propagate our changes to the userspace mapping. */
262 perf_event_update_userpage(event
);
265 perf_pmu_enable(event
->pmu
);
270 validate_event(struct pmu_hw_events
*hw_events
,
271 struct perf_event
*event
)
273 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
274 struct pmu
*leader_pmu
= event
->group_leader
->pmu
;
276 if (is_software_event(event
))
279 if (event
->pmu
!= leader_pmu
|| event
->state
< PERF_EVENT_STATE_OFF
)
282 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
285 return armpmu
->get_event_idx(hw_events
, event
) >= 0;
289 validate_group(struct perf_event
*event
)
291 struct perf_event
*sibling
, *leader
= event
->group_leader
;
292 struct pmu_hw_events fake_pmu
;
293 DECLARE_BITMAP(fake_used_mask
, ARMPMU_MAX_HWEVENTS
);
296 * Initialise the fake PMU. We only need to populate the
297 * used_mask for the purposes of validation.
299 memset(fake_used_mask
, 0, sizeof(fake_used_mask
));
300 fake_pmu
.used_mask
= fake_used_mask
;
302 if (!validate_event(&fake_pmu
, leader
))
305 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
306 if (!validate_event(&fake_pmu
, sibling
))
310 if (!validate_event(&fake_pmu
, event
))
316 static irqreturn_t
armpmu_dispatch_irq(int irq
, void *dev
)
318 struct arm_pmu
*armpmu
= (struct arm_pmu
*) dev
;
319 struct platform_device
*plat_device
= armpmu
->plat_device
;
320 struct arm_pmu_platdata
*plat
= dev_get_platdata(&plat_device
->dev
);
322 u64 start_clock
, finish_clock
;
324 start_clock
= sched_clock();
325 if (plat
&& plat
->handle_irq
)
326 ret
= plat
->handle_irq(irq
, dev
, armpmu
->handle_irq
);
328 ret
= armpmu
->handle_irq(irq
, dev
);
329 finish_clock
= sched_clock();
331 perf_sample_event_took(finish_clock
- start_clock
);
336 armpmu_release_hardware(struct arm_pmu
*armpmu
)
338 armpmu
->free_irq(armpmu
);
339 pm_runtime_put_sync(&armpmu
->plat_device
->dev
);
343 armpmu_reserve_hardware(struct arm_pmu
*armpmu
)
346 struct platform_device
*pmu_device
= armpmu
->plat_device
;
351 pm_runtime_get_sync(&pmu_device
->dev
);
352 err
= armpmu
->request_irq(armpmu
, armpmu_dispatch_irq
);
354 armpmu_release_hardware(armpmu
);
362 hw_perf_event_destroy(struct perf_event
*event
)
364 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
365 atomic_t
*active_events
= &armpmu
->active_events
;
366 struct mutex
*pmu_reserve_mutex
= &armpmu
->reserve_mutex
;
368 if (atomic_dec_and_mutex_lock(active_events
, pmu_reserve_mutex
)) {
369 armpmu_release_hardware(armpmu
);
370 mutex_unlock(pmu_reserve_mutex
);
375 event_requires_mode_exclusion(struct perf_event_attr
*attr
)
377 return attr
->exclude_idle
|| attr
->exclude_user
||
378 attr
->exclude_kernel
|| attr
->exclude_hv
;
382 __hw_perf_event_init(struct perf_event
*event
)
384 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
385 struct hw_perf_event
*hwc
= &event
->hw
;
388 mapping
= armpmu
->map_event(event
);
391 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
397 * We don't assign an index until we actually place the event onto
398 * hardware. Use -1 to signify that we haven't decided where to put it
399 * yet. For SMP systems, each core has it's own PMU so we can't do any
400 * clever allocation or constraints checking at this point.
403 hwc
->config_base
= 0;
408 * Check whether we need to exclude the counter from certain modes.
410 if ((!armpmu
->set_event_filter
||
411 armpmu
->set_event_filter(hwc
, &event
->attr
)) &&
412 event_requires_mode_exclusion(&event
->attr
)) {
413 pr_debug("ARM performance counters do not support "
419 * Store the event encoding into the config_base field.
421 hwc
->config_base
|= (unsigned long)mapping
;
423 if (!hwc
->sample_period
) {
425 * For non-sampling runs, limit the sample_period to half
426 * of the counter width. That way, the new counter value
427 * is far less likely to overtake the previous one unless
428 * you have some serious IRQ latency issues.
430 hwc
->sample_period
= armpmu
->max_period
>> 1;
431 hwc
->last_period
= hwc
->sample_period
;
432 local64_set(&hwc
->period_left
, hwc
->sample_period
);
435 if (event
->group_leader
!= event
) {
436 if (validate_group(event
) != 0)
443 static int armpmu_event_init(struct perf_event
*event
)
445 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
447 atomic_t
*active_events
= &armpmu
->active_events
;
449 if (event
->cpu
!= -1 &&
450 !cpumask_test_cpu(event
->cpu
, &armpmu
->valid_cpus
))
453 /* does not support taken branch sampling */
454 if (has_branch_stack(event
))
457 if (armpmu
->map_event(event
) == -ENOENT
)
460 event
->destroy
= hw_perf_event_destroy
;
462 if (!atomic_inc_not_zero(active_events
)) {
463 mutex_lock(&armpmu
->reserve_mutex
);
464 if (atomic_read(active_events
) == 0)
465 err
= armpmu_reserve_hardware(armpmu
);
468 atomic_inc(active_events
);
469 mutex_unlock(&armpmu
->reserve_mutex
);
475 err
= __hw_perf_event_init(event
);
477 hw_perf_event_destroy(event
);
482 static void armpmu_enable(struct pmu
*pmu
)
484 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
485 struct pmu_hw_events
*hw_events
= armpmu
->get_hw_events();
486 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
489 armpmu
->start(armpmu
);
492 static void armpmu_disable(struct pmu
*pmu
)
494 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
495 armpmu
->stop(armpmu
);
498 #ifdef CONFIG_PM_RUNTIME
499 static int armpmu_runtime_resume(struct device
*dev
)
501 struct arm_pmu_platdata
*plat
= dev_get_platdata(dev
);
503 if (plat
&& plat
->runtime_resume
)
504 return plat
->runtime_resume(dev
);
509 static int armpmu_runtime_suspend(struct device
*dev
)
511 struct arm_pmu_platdata
*plat
= dev_get_platdata(dev
);
513 if (plat
&& plat
->runtime_suspend
)
514 return plat
->runtime_suspend(dev
);
520 const struct dev_pm_ops armpmu_dev_pm_ops
= {
521 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend
, armpmu_runtime_resume
, NULL
)
524 static void armpmu_init(struct arm_pmu
*armpmu
)
526 atomic_set(&armpmu
->active_events
, 0);
527 mutex_init(&armpmu
->reserve_mutex
);
529 armpmu
->pmu
= (struct pmu
) {
530 .pmu_enable
= armpmu_enable
,
531 .pmu_disable
= armpmu_disable
,
532 .event_init
= armpmu_event_init
,
535 .start
= armpmu_start
,
541 int armpmu_register(struct arm_pmu
*armpmu
, int type
)
544 pm_runtime_enable(&armpmu
->plat_device
->dev
);
545 pr_info("enabled with %s PMU driver, %d counters available\n",
546 armpmu
->name
, armpmu
->num_events
);
547 return perf_pmu_register(&armpmu
->pmu
, armpmu
->name
, type
);
551 * Callchain handling code.
555 * The registers we're interested in are at the end of the variable
556 * length saved register structure. The fp points at the end of this
557 * structure so the address of this struct is:
558 * (struct frame_tail *)(xxx->fp)-1
560 * This code has been adapted from the ARM OProfile support.
563 struct frame_tail __user
*fp
;
566 } __attribute__((packed
));
569 * Get the return address for a single stackframe and return a pointer to the
572 static struct frame_tail __user
*
573 user_backtrace(struct frame_tail __user
*tail
,
574 struct perf_callchain_entry
*entry
)
576 struct frame_tail buftail
;
578 /* Also check accessibility of one struct frame_tail beyond */
579 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
581 if (__copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
)))
584 perf_callchain_store(entry
, buftail
.lr
);
587 * Frame pointers should strictly progress back up the stack
588 * (towards higher addresses).
590 if (tail
+ 1 >= buftail
.fp
)
593 return buftail
.fp
- 1;
597 perf_callchain_user(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
599 struct frame_tail __user
*tail
;
601 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
602 /* We don't support guest os callchain now */
606 perf_callchain_store(entry
, regs
->ARM_pc
);
607 tail
= (struct frame_tail __user
*)regs
->ARM_fp
- 1;
609 while ((entry
->nr
< PERF_MAX_STACK_DEPTH
) &&
610 tail
&& !((unsigned long)tail
& 0x3))
611 tail
= user_backtrace(tail
, entry
);
615 * Gets called by walk_stackframe() for every stackframe. This will be called
616 * whist unwinding the stackframe and is like a subroutine return so we use
620 callchain_trace(struct stackframe
*fr
,
623 struct perf_callchain_entry
*entry
= data
;
624 perf_callchain_store(entry
, fr
->pc
);
629 perf_callchain_kernel(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
631 struct stackframe fr
;
633 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
634 /* We don't support guest os callchain now */
638 fr
.fp
= regs
->ARM_fp
;
639 fr
.sp
= regs
->ARM_sp
;
640 fr
.lr
= regs
->ARM_lr
;
641 fr
.pc
= regs
->ARM_pc
;
642 walk_stackframe(&fr
, callchain_trace
, entry
);
645 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
647 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest())
648 return perf_guest_cbs
->get_guest_ip();
650 return instruction_pointer(regs
);
653 unsigned long perf_misc_flags(struct pt_regs
*regs
)
657 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
658 if (perf_guest_cbs
->is_user_mode())
659 misc
|= PERF_RECORD_MISC_GUEST_USER
;
661 misc
|= PERF_RECORD_MISC_GUEST_KERNEL
;
664 misc
|= PERF_RECORD_MISC_USER
;
666 misc
|= PERF_RECORD_MISC_KERNEL
;