2 * Performance event support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 #define BHRB_MAX_ENTRIES 32
23 #define BHRB_TARGET 0x0000000000000002
24 #define BHRB_PREDICTION 0x0000000000000001
25 #define BHRB_EA 0xFFFFFFFFFFFFFFFC
27 struct cpu_hw_events
{
34 struct perf_event
*event
[MAX_HWEVENTS
];
35 u64 events
[MAX_HWEVENTS
];
36 unsigned int flags
[MAX_HWEVENTS
];
37 unsigned long mmcr
[3];
38 struct perf_event
*limited_counter
[MAX_LIMITED_HWCOUNTERS
];
39 u8 limited_hwidx
[MAX_LIMITED_HWCOUNTERS
];
40 u64 alternatives
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
41 unsigned long amasks
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
42 unsigned long avalues
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
44 unsigned int group_flag
;
48 u64 bhrb_filter
; /* BHRB HW branch filter */
51 struct perf_branch_stack bhrb_stack
;
52 struct perf_branch_entry bhrb_entries
[BHRB_MAX_ENTRIES
];
55 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
57 struct power_pmu
*ppmu
;
60 * Normally, to ignore kernel events we set the FCS (freeze counters
61 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
62 * hypervisor bit set in the MSR, or if we are running on a processor
63 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
64 * then we need to use the FCHV bit to ignore kernel events.
66 static unsigned int freeze_events_kernel
= MMCR0_FCS
;
69 * 32-bit doesn't have MMCRA but does have an MMCR2,
70 * and a few other names are different.
75 #define MMCR0_PMCjCE MMCR0_PMCnCE
77 #define SPRN_MMCRA SPRN_MMCR2
78 #define MMCRA_SAMPLE_ENABLE 0
80 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
84 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
) { }
85 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
89 static inline void perf_read_regs(struct pt_regs
*regs
)
93 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
98 static inline int siar_valid(struct pt_regs
*regs
)
103 #endif /* CONFIG_PPC32 */
105 static bool regs_use_siar(struct pt_regs
*regs
)
107 return !!(regs
->result
& 1);
111 * Things that are specific to 64-bit implementations.
115 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
117 unsigned long mmcra
= regs
->dsisr
;
119 if ((ppmu
->flags
& PPMU_HAS_SSLOT
) && (mmcra
& MMCRA_SAMPLE_ENABLE
)) {
120 unsigned long slot
= (mmcra
& MMCRA_SLOT
) >> MMCRA_SLOT_SHIFT
;
122 return 4 * (slot
- 1);
129 * The user wants a data address recorded.
130 * If we're not doing instruction sampling, give them the SDAR
131 * (sampled data address). If we are doing instruction sampling, then
132 * only give them the SDAR if it corresponds to the instruction
133 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
134 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
136 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
)
138 unsigned long mmcra
= regs
->dsisr
;
139 unsigned long sdsync
;
141 if (ppmu
->flags
& PPMU_SIAR_VALID
)
142 sdsync
= POWER7P_MMCRA_SDAR_VALID
;
143 else if (ppmu
->flags
& PPMU_ALT_SIPR
)
144 sdsync
= POWER6_MMCRA_SDSYNC
;
146 sdsync
= MMCRA_SDSYNC
;
148 if (!(mmcra
& MMCRA_SAMPLE_ENABLE
) || (mmcra
& sdsync
))
149 *addrp
= mfspr(SPRN_SDAR
);
152 static bool regs_sihv(struct pt_regs
*regs
)
154 unsigned long sihv
= MMCRA_SIHV
;
156 if (ppmu
->flags
& PPMU_HAS_SIER
)
157 return !!(regs
->dar
& SIER_SIHV
);
159 if (ppmu
->flags
& PPMU_ALT_SIPR
)
160 sihv
= POWER6_MMCRA_SIHV
;
162 return !!(regs
->dsisr
& sihv
);
165 static bool regs_sipr(struct pt_regs
*regs
)
167 unsigned long sipr
= MMCRA_SIPR
;
169 if (ppmu
->flags
& PPMU_HAS_SIER
)
170 return !!(regs
->dar
& SIER_SIPR
);
172 if (ppmu
->flags
& PPMU_ALT_SIPR
)
173 sipr
= POWER6_MMCRA_SIPR
;
175 return !!(regs
->dsisr
& sipr
);
178 static bool regs_no_sipr(struct pt_regs
*regs
)
180 return !!(regs
->result
& 2);
183 static inline u32
perf_flags_from_msr(struct pt_regs
*regs
)
185 if (regs
->msr
& MSR_PR
)
186 return PERF_RECORD_MISC_USER
;
187 if ((regs
->msr
& MSR_HV
) && freeze_events_kernel
!= MMCR0_FCHV
)
188 return PERF_RECORD_MISC_HYPERVISOR
;
189 return PERF_RECORD_MISC_KERNEL
;
192 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
194 bool use_siar
= regs_use_siar(regs
);
197 return perf_flags_from_msr(regs
);
200 * If we don't have flags in MMCRA, rather than using
201 * the MSR, we intuit the flags from the address in
202 * SIAR which should give slightly more reliable
205 if (regs_no_sipr(regs
)) {
206 unsigned long siar
= mfspr(SPRN_SIAR
);
207 if (siar
>= PAGE_OFFSET
)
208 return PERF_RECORD_MISC_KERNEL
;
209 return PERF_RECORD_MISC_USER
;
212 /* PR has priority over HV, so order below is important */
214 return PERF_RECORD_MISC_USER
;
216 if (regs_sihv(regs
) && (freeze_events_kernel
!= MMCR0_FCHV
))
217 return PERF_RECORD_MISC_HYPERVISOR
;
219 return PERF_RECORD_MISC_KERNEL
;
223 * Overload regs->dsisr to store MMCRA so we only need to read it once
225 * Overload regs->dar to store SIER if we have it.
226 * Overload regs->result to specify whether we should use the MSR (result
227 * is zero) or the SIAR (result is non zero).
229 static inline void perf_read_regs(struct pt_regs
*regs
)
231 unsigned long mmcra
= mfspr(SPRN_MMCRA
);
232 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
238 if (ppmu
->flags
& PPMU_NO_SIPR
)
242 * On power8 if we're in random sampling mode, the SIER is updated.
243 * If we're in continuous sampling mode, we don't have SIPR.
245 if (ppmu
->flags
& PPMU_HAS_SIER
) {
247 regs
->dar
= mfspr(SPRN_SIER
);
254 * If this isn't a PMU exception (eg a software event) the SIAR is
255 * not valid. Use pt_regs.
257 * If it is a marked event use the SIAR.
259 * If the PMU doesn't update the SIAR for non marked events use
262 * If the PMU has HV/PR flags then check to see if they
263 * place the exception in userspace. If so, use pt_regs. In
264 * continuous sampling mode the SIAR and the PMU exception are
265 * not synchronised, so they may be many instructions apart.
266 * This can result in confusing backtraces. We still want
267 * hypervisor samples as well as samples in the kernel with
268 * interrupts off hence the userspace check.
270 if (TRAP(regs
) != 0xf00)
274 else if ((ppmu
->flags
& PPMU_NO_CONT_SAMPLING
))
276 else if (!regs_no_sipr(regs
) && regs_sipr(regs
))
281 regs
->result
|= use_siar
;
285 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
288 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
294 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
295 * must be sampled only if the SIAR-valid bit is set.
297 * For unmarked instructions and for processors that don't have the SIAR-Valid
298 * bit, assume that SIAR is valid.
300 static inline int siar_valid(struct pt_regs
*regs
)
302 unsigned long mmcra
= regs
->dsisr
;
303 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
305 if ((ppmu
->flags
& PPMU_SIAR_VALID
) && marked
)
306 return mmcra
& POWER7P_MMCRA_SIAR_VALID
;
311 #endif /* CONFIG_PPC64 */
313 static void perf_event_interrupt(struct pt_regs
*regs
);
315 void perf_event_print_debug(void)
320 * Read one performance monitor counter (PMC).
322 static unsigned long read_pmc(int idx
)
328 val
= mfspr(SPRN_PMC1
);
331 val
= mfspr(SPRN_PMC2
);
334 val
= mfspr(SPRN_PMC3
);
337 val
= mfspr(SPRN_PMC4
);
340 val
= mfspr(SPRN_PMC5
);
343 val
= mfspr(SPRN_PMC6
);
347 val
= mfspr(SPRN_PMC7
);
350 val
= mfspr(SPRN_PMC8
);
352 #endif /* CONFIG_PPC64 */
354 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
363 static void write_pmc(int idx
, unsigned long val
)
367 mtspr(SPRN_PMC1
, val
);
370 mtspr(SPRN_PMC2
, val
);
373 mtspr(SPRN_PMC3
, val
);
376 mtspr(SPRN_PMC4
, val
);
379 mtspr(SPRN_PMC5
, val
);
382 mtspr(SPRN_PMC6
, val
);
386 mtspr(SPRN_PMC7
, val
);
389 mtspr(SPRN_PMC8
, val
);
391 #endif /* CONFIG_PPC64 */
393 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
398 * Check if a set of events can all go on the PMU at once.
399 * If they can't, this will look at alternative codes for the events
400 * and see if any combination of alternative codes is feasible.
401 * The feasible set is returned in event_id[].
403 static int power_check_constraints(struct cpu_hw_events
*cpuhw
,
404 u64 event_id
[], unsigned int cflags
[],
407 unsigned long mask
, value
, nv
;
408 unsigned long smasks
[MAX_HWEVENTS
], svalues
[MAX_HWEVENTS
];
409 int n_alt
[MAX_HWEVENTS
], choice
[MAX_HWEVENTS
];
411 unsigned long addf
= ppmu
->add_fields
;
412 unsigned long tadd
= ppmu
->test_adder
;
414 if (n_ev
> ppmu
->n_counter
)
417 /* First see if the events will go on as-is */
418 for (i
= 0; i
< n_ev
; ++i
) {
419 if ((cflags
[i
] & PPMU_LIMITED_PMC_REQD
)
420 && !ppmu
->limited_pmc_event(event_id
[i
])) {
421 ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
422 cpuhw
->alternatives
[i
]);
423 event_id
[i
] = cpuhw
->alternatives
[i
][0];
425 if (ppmu
->get_constraint(event_id
[i
], &cpuhw
->amasks
[i
][0],
426 &cpuhw
->avalues
[i
][0]))
430 for (i
= 0; i
< n_ev
; ++i
) {
431 nv
= (value
| cpuhw
->avalues
[i
][0]) +
432 (value
& cpuhw
->avalues
[i
][0] & addf
);
433 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
434 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][0]) &
435 cpuhw
->amasks
[i
][0]) != 0)
438 mask
|= cpuhw
->amasks
[i
][0];
441 return 0; /* all OK */
443 /* doesn't work, gather alternatives... */
444 if (!ppmu
->get_alternatives
)
446 for (i
= 0; i
< n_ev
; ++i
) {
448 n_alt
[i
] = ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
449 cpuhw
->alternatives
[i
]);
450 for (j
= 1; j
< n_alt
[i
]; ++j
)
451 ppmu
->get_constraint(cpuhw
->alternatives
[i
][j
],
452 &cpuhw
->amasks
[i
][j
],
453 &cpuhw
->avalues
[i
][j
]);
456 /* enumerate all possibilities and see if any will work */
459 value
= mask
= nv
= 0;
462 /* we're backtracking, restore context */
468 * See if any alternative k for event_id i,
469 * where k > j, will satisfy the constraints.
471 while (++j
< n_alt
[i
]) {
472 nv
= (value
| cpuhw
->avalues
[i
][j
]) +
473 (value
& cpuhw
->avalues
[i
][j
] & addf
);
474 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
475 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][j
])
476 & cpuhw
->amasks
[i
][j
]) == 0)
481 * No feasible alternative, backtrack
482 * to event_id i-1 and continue enumerating its
483 * alternatives from where we got up to.
489 * Found a feasible alternative for event_id i,
490 * remember where we got up to with this event_id,
491 * go on to the next event_id, and start with
492 * the first alternative for it.
498 mask
|= cpuhw
->amasks
[i
][j
];
504 /* OK, we have a feasible combination, tell the caller the solution */
505 for (i
= 0; i
< n_ev
; ++i
)
506 event_id
[i
] = cpuhw
->alternatives
[i
][choice
[i
]];
511 * Check if newly-added events have consistent settings for
512 * exclude_{user,kernel,hv} with each other and any previously
515 static int check_excludes(struct perf_event
**ctrs
, unsigned int cflags
[],
516 int n_prev
, int n_new
)
518 int eu
= 0, ek
= 0, eh
= 0;
520 struct perf_event
*event
;
527 for (i
= 0; i
< n
; ++i
) {
528 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
) {
529 cflags
[i
] &= ~PPMU_LIMITED_PMC_REQD
;
534 eu
= event
->attr
.exclude_user
;
535 ek
= event
->attr
.exclude_kernel
;
536 eh
= event
->attr
.exclude_hv
;
538 } else if (event
->attr
.exclude_user
!= eu
||
539 event
->attr
.exclude_kernel
!= ek
||
540 event
->attr
.exclude_hv
!= eh
) {
546 for (i
= 0; i
< n
; ++i
)
547 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
)
548 cflags
[i
] |= PPMU_LIMITED_PMC_REQD
;
553 static u64
check_and_compute_delta(u64 prev
, u64 val
)
555 u64 delta
= (val
- prev
) & 0xfffffffful
;
558 * POWER7 can roll back counter values, if the new value is smaller
559 * than the previous value it will cause the delta and the counter to
560 * have bogus values unless we rolled a counter over. If a coutner is
561 * rolled back, it will be smaller, but within 256, which is the maximum
562 * number of events to rollback at once. If we dectect a rollback
563 * return 0. This can lead to a small lack of precision in the
566 if (prev
> val
&& (prev
- val
) < 256)
572 static void power_pmu_read(struct perf_event
*event
)
574 s64 val
, delta
, prev
;
576 if (event
->hw
.state
& PERF_HES_STOPPED
)
582 * Performance monitor interrupts come even when interrupts
583 * are soft-disabled, as long as interrupts are hard-enabled.
584 * Therefore we treat them like NMIs.
587 prev
= local64_read(&event
->hw
.prev_count
);
589 val
= read_pmc(event
->hw
.idx
);
590 delta
= check_and_compute_delta(prev
, val
);
593 } while (local64_cmpxchg(&event
->hw
.prev_count
, prev
, val
) != prev
);
595 local64_add(delta
, &event
->count
);
596 local64_sub(delta
, &event
->hw
.period_left
);
600 * On some machines, PMC5 and PMC6 can't be written, don't respect
601 * the freeze conditions, and don't generate interrupts. This tells
602 * us if `event' is using such a PMC.
604 static int is_limited_pmc(int pmcnum
)
606 return (ppmu
->flags
& PPMU_LIMITED_PMC5_6
)
607 && (pmcnum
== 5 || pmcnum
== 6);
610 static void freeze_limited_counters(struct cpu_hw_events
*cpuhw
,
611 unsigned long pmc5
, unsigned long pmc6
)
613 struct perf_event
*event
;
614 u64 val
, prev
, delta
;
617 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
618 event
= cpuhw
->limited_counter
[i
];
621 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
622 prev
= local64_read(&event
->hw
.prev_count
);
624 delta
= check_and_compute_delta(prev
, val
);
626 local64_add(delta
, &event
->count
);
630 static void thaw_limited_counters(struct cpu_hw_events
*cpuhw
,
631 unsigned long pmc5
, unsigned long pmc6
)
633 struct perf_event
*event
;
637 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
638 event
= cpuhw
->limited_counter
[i
];
639 event
->hw
.idx
= cpuhw
->limited_hwidx
[i
];
640 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
641 prev
= local64_read(&event
->hw
.prev_count
);
642 if (check_and_compute_delta(prev
, val
))
643 local64_set(&event
->hw
.prev_count
, val
);
644 perf_event_update_userpage(event
);
649 * Since limited events don't respect the freeze conditions, we
650 * have to read them immediately after freezing or unfreezing the
651 * other events. We try to keep the values from the limited
652 * events as consistent as possible by keeping the delay (in
653 * cycles and instructions) between freezing/unfreezing and reading
654 * the limited events as small and consistent as possible.
655 * Therefore, if any limited events are in use, we read them
656 * both, and always in the same order, to minimize variability,
657 * and do it inside the same asm that writes MMCR0.
659 static void write_mmcr0(struct cpu_hw_events
*cpuhw
, unsigned long mmcr0
)
661 unsigned long pmc5
, pmc6
;
663 if (!cpuhw
->n_limited
) {
664 mtspr(SPRN_MMCR0
, mmcr0
);
669 * Write MMCR0, then read PMC5 and PMC6 immediately.
670 * To ensure we don't get a performance monitor interrupt
671 * between writing MMCR0 and freezing/thawing the limited
672 * events, we first write MMCR0 with the event overflow
673 * interrupt enable bits turned off.
675 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
676 : "=&r" (pmc5
), "=&r" (pmc6
)
677 : "r" (mmcr0
& ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
)),
679 "i" (SPRN_PMC5
), "i" (SPRN_PMC6
));
681 if (mmcr0
& MMCR0_FC
)
682 freeze_limited_counters(cpuhw
, pmc5
, pmc6
);
684 thaw_limited_counters(cpuhw
, pmc5
, pmc6
);
687 * Write the full MMCR0 including the event overflow interrupt
688 * enable bits, if necessary.
690 if (mmcr0
& (MMCR0_PMC1CE
| MMCR0_PMCjCE
))
691 mtspr(SPRN_MMCR0
, mmcr0
);
695 * Disable all events to prevent PMU interrupts and to allow
696 * events to be added or removed.
698 static void power_pmu_disable(struct pmu
*pmu
)
700 struct cpu_hw_events
*cpuhw
;
705 local_irq_save(flags
);
706 cpuhw
= &__get_cpu_var(cpu_hw_events
);
708 if (!cpuhw
->disabled
) {
713 * Check if we ever enabled the PMU on this cpu.
715 if (!cpuhw
->pmcs_enabled
) {
717 cpuhw
->pmcs_enabled
= 1;
721 * Disable instruction sampling if it was enabled
723 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
725 cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
730 * Set the 'freeze counters' bit.
731 * The barrier is to make sure the mtspr has been
732 * executed and the PMU has frozen the events
735 write_mmcr0(cpuhw
, mfspr(SPRN_MMCR0
) | MMCR0_FC
);
738 local_irq_restore(flags
);
742 * Re-enable all events if disable == 0.
743 * If we were previously disabled and events were added, then
744 * put the new config on the PMU.
746 static void power_pmu_enable(struct pmu
*pmu
)
748 struct perf_event
*event
;
749 struct cpu_hw_events
*cpuhw
;
754 unsigned int hwc_index
[MAX_HWEVENTS
];
760 local_irq_save(flags
);
761 cpuhw
= &__get_cpu_var(cpu_hw_events
);
762 if (!cpuhw
->disabled
) {
763 local_irq_restore(flags
);
769 * If we didn't change anything, or only removed events,
770 * no need to recalculate MMCR* settings and reset the PMCs.
771 * Just reenable the PMU with the current MMCR* settings
772 * (possibly updated for removal of events).
774 if (!cpuhw
->n_added
) {
775 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
776 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
777 if (cpuhw
->n_events
== 0)
778 ppc_set_pmu_inuse(0);
783 * Compute MMCR* values for the new set of events
785 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_events
, hwc_index
,
787 /* shouldn't ever get here */
788 printk(KERN_ERR
"oops compute_mmcr failed\n");
793 * Add in MMCR0 freeze bits corresponding to the
794 * attr.exclude_* bits for the first event.
795 * We have already checked that all events have the
796 * same values for these bits as the first event.
798 event
= cpuhw
->event
[0];
799 if (event
->attr
.exclude_user
)
800 cpuhw
->mmcr
[0] |= MMCR0_FCP
;
801 if (event
->attr
.exclude_kernel
)
802 cpuhw
->mmcr
[0] |= freeze_events_kernel
;
803 if (event
->attr
.exclude_hv
)
804 cpuhw
->mmcr
[0] |= MMCR0_FCHV
;
807 * Write the new configuration to MMCR* with the freeze
808 * bit set and set the hardware events to their initial values.
809 * Then unfreeze the events.
811 ppc_set_pmu_inuse(1);
812 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
813 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
814 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
818 * Read off any pre-existing events that need to move
821 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
822 event
= cpuhw
->event
[i
];
823 if (event
->hw
.idx
&& event
->hw
.idx
!= hwc_index
[i
] + 1) {
824 power_pmu_read(event
);
825 write_pmc(event
->hw
.idx
, 0);
831 * Initialize the PMCs for all the new and moved events.
833 cpuhw
->n_limited
= n_lim
= 0;
834 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
835 event
= cpuhw
->event
[i
];
838 idx
= hwc_index
[i
] + 1;
839 if (is_limited_pmc(idx
)) {
840 cpuhw
->limited_counter
[n_lim
] = event
;
841 cpuhw
->limited_hwidx
[n_lim
] = idx
;
846 if (event
->hw
.sample_period
) {
847 left
= local64_read(&event
->hw
.period_left
);
848 if (left
< 0x80000000L
)
849 val
= 0x80000000L
- left
;
851 local64_set(&event
->hw
.prev_count
, val
);
853 if (event
->hw
.state
& PERF_HES_STOPPED
)
856 perf_event_update_userpage(event
);
858 cpuhw
->n_limited
= n_lim
;
859 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
863 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
866 * Enable instruction sampling if necessary
868 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
870 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
874 if (cpuhw
->bhrb_users
)
875 ppmu
->config_bhrb(cpuhw
->bhrb_filter
);
877 local_irq_restore(flags
);
880 static int collect_events(struct perf_event
*group
, int max_count
,
881 struct perf_event
*ctrs
[], u64
*events
,
885 struct perf_event
*event
;
887 if (!is_software_event(group
)) {
891 flags
[n
] = group
->hw
.event_base
;
892 events
[n
++] = group
->hw
.config
;
894 list_for_each_entry(event
, &group
->sibling_list
, group_entry
) {
895 if (!is_software_event(event
) &&
896 event
->state
!= PERF_EVENT_STATE_OFF
) {
900 flags
[n
] = event
->hw
.event_base
;
901 events
[n
++] = event
->hw
.config
;
907 /* Reset all possible BHRB entries */
908 static void power_pmu_bhrb_reset(void)
910 asm volatile(PPC_CLRBHRB
);
913 void power_pmu_bhrb_enable(struct perf_event
*event
)
915 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
920 /* Clear BHRB if we changed task context to avoid data leaks */
921 if (event
->ctx
->task
&& cpuhw
->bhrb_context
!= event
->ctx
) {
922 power_pmu_bhrb_reset();
923 cpuhw
->bhrb_context
= event
->ctx
;
928 void power_pmu_bhrb_disable(struct perf_event
*event
)
930 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
936 WARN_ON_ONCE(cpuhw
->bhrb_users
< 0);
938 if (!cpuhw
->disabled
&& !cpuhw
->bhrb_users
) {
939 /* BHRB cannot be turned off when other
940 * events are active on the PMU.
943 /* avoid stale pointer */
944 cpuhw
->bhrb_context
= NULL
;
949 * Add a event to the PMU.
950 * If all events are not already frozen, then we disable and
951 * re-enable the PMU in order to get hw_perf_enable to do the
952 * actual work of reconfiguring the PMU.
954 static int power_pmu_add(struct perf_event
*event
, int ef_flags
)
956 struct cpu_hw_events
*cpuhw
;
961 local_irq_save(flags
);
962 perf_pmu_disable(event
->pmu
);
965 * Add the event to the list (if there is room)
966 * and check whether the total set is still feasible.
968 cpuhw
= &__get_cpu_var(cpu_hw_events
);
969 n0
= cpuhw
->n_events
;
970 if (n0
>= ppmu
->n_counter
)
972 cpuhw
->event
[n0
] = event
;
973 cpuhw
->events
[n0
] = event
->hw
.config
;
974 cpuhw
->flags
[n0
] = event
->hw
.event_base
;
977 * This event may have been disabled/stopped in record_and_restart()
978 * because we exceeded the ->event_limit. If re-starting the event,
979 * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
980 * notification is re-enabled.
982 if (!(ef_flags
& PERF_EF_START
))
983 event
->hw
.state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
988 * If group events scheduling transaction was started,
989 * skip the schedulability test here, it will be performed
990 * at commit time(->commit_txn) as a whole
992 if (cpuhw
->group_flag
& PERF_EVENT_TXN
)
995 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, n0
, 1))
997 if (power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n0
+ 1))
999 event
->hw
.config
= cpuhw
->events
[n0
];
1007 if (has_branch_stack(event
))
1008 power_pmu_bhrb_enable(event
);
1010 perf_pmu_enable(event
->pmu
);
1011 local_irq_restore(flags
);
1016 * Remove a event from the PMU.
1018 static void power_pmu_del(struct perf_event
*event
, int ef_flags
)
1020 struct cpu_hw_events
*cpuhw
;
1022 unsigned long flags
;
1024 local_irq_save(flags
);
1025 perf_pmu_disable(event
->pmu
);
1027 power_pmu_read(event
);
1029 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1030 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1031 if (event
== cpuhw
->event
[i
]) {
1032 while (++i
< cpuhw
->n_events
) {
1033 cpuhw
->event
[i
-1] = cpuhw
->event
[i
];
1034 cpuhw
->events
[i
-1] = cpuhw
->events
[i
];
1035 cpuhw
->flags
[i
-1] = cpuhw
->flags
[i
];
1038 ppmu
->disable_pmc(event
->hw
.idx
- 1, cpuhw
->mmcr
);
1039 if (event
->hw
.idx
) {
1040 write_pmc(event
->hw
.idx
, 0);
1043 perf_event_update_userpage(event
);
1047 for (i
= 0; i
< cpuhw
->n_limited
; ++i
)
1048 if (event
== cpuhw
->limited_counter
[i
])
1050 if (i
< cpuhw
->n_limited
) {
1051 while (++i
< cpuhw
->n_limited
) {
1052 cpuhw
->limited_counter
[i
-1] = cpuhw
->limited_counter
[i
];
1053 cpuhw
->limited_hwidx
[i
-1] = cpuhw
->limited_hwidx
[i
];
1057 if (cpuhw
->n_events
== 0) {
1058 /* disable exceptions if no events are running */
1059 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
1062 if (has_branch_stack(event
))
1063 power_pmu_bhrb_disable(event
);
1065 perf_pmu_enable(event
->pmu
);
1066 local_irq_restore(flags
);
1070 * POWER-PMU does not support disabling individual counters, hence
1071 * program their cycle counter to their max value and ignore the interrupts.
1074 static void power_pmu_start(struct perf_event
*event
, int ef_flags
)
1076 unsigned long flags
;
1080 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1083 if (!(event
->hw
.state
& PERF_HES_STOPPED
))
1086 if (ef_flags
& PERF_EF_RELOAD
)
1087 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
1089 local_irq_save(flags
);
1090 perf_pmu_disable(event
->pmu
);
1092 event
->hw
.state
= 0;
1093 left
= local64_read(&event
->hw
.period_left
);
1096 if (left
< 0x80000000L
)
1097 val
= 0x80000000L
- left
;
1099 write_pmc(event
->hw
.idx
, val
);
1101 perf_event_update_userpage(event
);
1102 perf_pmu_enable(event
->pmu
);
1103 local_irq_restore(flags
);
1106 static void power_pmu_stop(struct perf_event
*event
, int ef_flags
)
1108 unsigned long flags
;
1110 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1113 if (event
->hw
.state
& PERF_HES_STOPPED
)
1116 local_irq_save(flags
);
1117 perf_pmu_disable(event
->pmu
);
1119 power_pmu_read(event
);
1120 event
->hw
.state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1121 write_pmc(event
->hw
.idx
, 0);
1123 perf_event_update_userpage(event
);
1124 perf_pmu_enable(event
->pmu
);
1125 local_irq_restore(flags
);
1129 * Start group events scheduling transaction
1130 * Set the flag to make pmu::enable() not perform the
1131 * schedulability test, it will be performed at commit time
1133 void power_pmu_start_txn(struct pmu
*pmu
)
1135 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1137 perf_pmu_disable(pmu
);
1138 cpuhw
->group_flag
|= PERF_EVENT_TXN
;
1139 cpuhw
->n_txn_start
= cpuhw
->n_events
;
1143 * Stop group events scheduling transaction
1144 * Clear the flag and pmu::enable() will perform the
1145 * schedulability test.
1147 void power_pmu_cancel_txn(struct pmu
*pmu
)
1149 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1151 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1152 perf_pmu_enable(pmu
);
1156 * Commit group events scheduling transaction
1157 * Perform the group schedulability test as a whole
1158 * Return 0 if success
1160 int power_pmu_commit_txn(struct pmu
*pmu
)
1162 struct cpu_hw_events
*cpuhw
;
1167 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1168 n
= cpuhw
->n_events
;
1169 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, 0, n
))
1171 i
= power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n
);
1175 for (i
= cpuhw
->n_txn_start
; i
< n
; ++i
)
1176 cpuhw
->event
[i
]->hw
.config
= cpuhw
->events
[i
];
1178 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1179 perf_pmu_enable(pmu
);
1183 /* Called from ctxsw to prevent one process's branch entries to
1184 * mingle with the other process's entries during context switch.
1186 void power_pmu_flush_branch_stack(void)
1189 power_pmu_bhrb_reset();
1193 * Return 1 if we might be able to put event on a limited PMC,
1195 * A event can only go on a limited PMC if it counts something
1196 * that a limited PMC can count, doesn't require interrupts, and
1197 * doesn't exclude any processor mode.
1199 static int can_go_on_limited_pmc(struct perf_event
*event
, u64 ev
,
1203 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1205 if (event
->attr
.exclude_user
1206 || event
->attr
.exclude_kernel
1207 || event
->attr
.exclude_hv
1208 || event
->attr
.sample_period
)
1211 if (ppmu
->limited_pmc_event(ev
))
1215 * The requested event_id isn't on a limited PMC already;
1216 * see if any alternative code goes on a limited PMC.
1218 if (!ppmu
->get_alternatives
)
1221 flags
|= PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
;
1222 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1228 * Find an alternative event_id that goes on a normal PMC, if possible,
1229 * and return the event_id code, or 0 if there is no such alternative.
1230 * (Note: event_id code 0 is "don't count" on all machines.)
1232 static u64
normal_pmc_alternative(u64 ev
, unsigned long flags
)
1234 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1237 flags
&= ~(PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
);
1238 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1244 /* Number of perf_events counting hardware events */
1245 static atomic_t num_events
;
1246 /* Used to avoid races in calling reserve/release_pmc_hardware */
1247 static DEFINE_MUTEX(pmc_reserve_mutex
);
1250 * Release the PMU if this is the last perf_event.
1252 static void hw_perf_event_destroy(struct perf_event
*event
)
1254 if (!atomic_add_unless(&num_events
, -1, 1)) {
1255 mutex_lock(&pmc_reserve_mutex
);
1256 if (atomic_dec_return(&num_events
) == 0)
1257 release_pmc_hardware();
1258 mutex_unlock(&pmc_reserve_mutex
);
1263 * Translate a generic cache event_id config to a raw event_id code.
1265 static int hw_perf_cache_event(u64 config
, u64
*eventp
)
1267 unsigned long type
, op
, result
;
1270 if (!ppmu
->cache_events
)
1274 type
= config
& 0xff;
1275 op
= (config
>> 8) & 0xff;
1276 result
= (config
>> 16) & 0xff;
1278 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
1279 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
1280 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
1283 ev
= (*ppmu
->cache_events
)[type
][op
][result
];
1292 static int power_pmu_event_init(struct perf_event
*event
)
1295 unsigned long flags
;
1296 struct perf_event
*ctrs
[MAX_HWEVENTS
];
1297 u64 events
[MAX_HWEVENTS
];
1298 unsigned int cflags
[MAX_HWEVENTS
];
1301 struct cpu_hw_events
*cpuhw
;
1306 if (has_branch_stack(event
)) {
1307 /* PMU has BHRB enabled */
1308 if (!(ppmu
->flags
& PPMU_BHRB
))
1312 switch (event
->attr
.type
) {
1313 case PERF_TYPE_HARDWARE
:
1314 ev
= event
->attr
.config
;
1315 if (ev
>= ppmu
->n_generic
|| ppmu
->generic_events
[ev
] == 0)
1317 ev
= ppmu
->generic_events
[ev
];
1319 case PERF_TYPE_HW_CACHE
:
1320 err
= hw_perf_cache_event(event
->attr
.config
, &ev
);
1325 ev
= event
->attr
.config
;
1331 event
->hw
.config_base
= ev
;
1335 * If we are not running on a hypervisor, force the
1336 * exclude_hv bit to 0 so that we don't care what
1337 * the user set it to.
1339 if (!firmware_has_feature(FW_FEATURE_LPAR
))
1340 event
->attr
.exclude_hv
= 0;
1343 * If this is a per-task event, then we can use
1344 * PM_RUN_* events interchangeably with their non RUN_*
1345 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1346 * XXX we should check if the task is an idle task.
1349 if (event
->attach_state
& PERF_ATTACH_TASK
)
1350 flags
|= PPMU_ONLY_COUNT_RUN
;
1353 * If this machine has limited events, check whether this
1354 * event_id could go on a limited event.
1356 if (ppmu
->flags
& PPMU_LIMITED_PMC5_6
) {
1357 if (can_go_on_limited_pmc(event
, ev
, flags
)) {
1358 flags
|= PPMU_LIMITED_PMC_OK
;
1359 } else if (ppmu
->limited_pmc_event(ev
)) {
1361 * The requested event_id is on a limited PMC,
1362 * but we can't use a limited PMC; see if any
1363 * alternative goes on a normal PMC.
1365 ev
= normal_pmc_alternative(ev
, flags
);
1372 * If this is in a group, check if it can go on with all the
1373 * other hardware events in the group. We assume the event
1374 * hasn't been linked into its leader's sibling list at this point.
1377 if (event
->group_leader
!= event
) {
1378 n
= collect_events(event
->group_leader
, ppmu
->n_counter
- 1,
1379 ctrs
, events
, cflags
);
1386 if (check_excludes(ctrs
, cflags
, n
, 1))
1389 cpuhw
= &get_cpu_var(cpu_hw_events
);
1390 err
= power_check_constraints(cpuhw
, events
, cflags
, n
+ 1);
1392 if (has_branch_stack(event
)) {
1393 cpuhw
->bhrb_filter
= ppmu
->bhrb_filter_map(
1394 event
->attr
.branch_sample_type
);
1396 if(cpuhw
->bhrb_filter
== -1)
1400 put_cpu_var(cpu_hw_events
);
1404 event
->hw
.config
= events
[n
];
1405 event
->hw
.event_base
= cflags
[n
];
1406 event
->hw
.last_period
= event
->hw
.sample_period
;
1407 local64_set(&event
->hw
.period_left
, event
->hw
.last_period
);
1410 * See if we need to reserve the PMU.
1411 * If no events are currently in use, then we have to take a
1412 * mutex to ensure that we don't race with another task doing
1413 * reserve_pmc_hardware or release_pmc_hardware.
1416 if (!atomic_inc_not_zero(&num_events
)) {
1417 mutex_lock(&pmc_reserve_mutex
);
1418 if (atomic_read(&num_events
) == 0 &&
1419 reserve_pmc_hardware(perf_event_interrupt
))
1422 atomic_inc(&num_events
);
1423 mutex_unlock(&pmc_reserve_mutex
);
1425 event
->destroy
= hw_perf_event_destroy
;
1430 static int power_pmu_event_idx(struct perf_event
*event
)
1432 return event
->hw
.idx
;
1435 ssize_t
power_events_sysfs_show(struct device
*dev
,
1436 struct device_attribute
*attr
, char *page
)
1438 struct perf_pmu_events_attr
*pmu_attr
;
1440 pmu_attr
= container_of(attr
, struct perf_pmu_events_attr
, attr
);
1442 return sprintf(page
, "event=0x%02llx\n", pmu_attr
->id
);
1445 struct pmu power_pmu
= {
1446 .pmu_enable
= power_pmu_enable
,
1447 .pmu_disable
= power_pmu_disable
,
1448 .event_init
= power_pmu_event_init
,
1449 .add
= power_pmu_add
,
1450 .del
= power_pmu_del
,
1451 .start
= power_pmu_start
,
1452 .stop
= power_pmu_stop
,
1453 .read
= power_pmu_read
,
1454 .start_txn
= power_pmu_start_txn
,
1455 .cancel_txn
= power_pmu_cancel_txn
,
1456 .commit_txn
= power_pmu_commit_txn
,
1457 .event_idx
= power_pmu_event_idx
,
1458 .flush_branch_stack
= power_pmu_flush_branch_stack
,
1461 /* Processing BHRB entries */
1462 void power_pmu_bhrb_read(struct cpu_hw_events
*cpuhw
)
1466 int r_index
, u_index
, target
, pred
;
1470 while (r_index
< ppmu
->bhrb_nr
) {
1471 /* Assembly read function */
1472 val
= read_bhrb(r_index
);
1474 /* Terminal marker: End of valid BHRB entries */
1478 /* BHRB field break up */
1479 addr
= val
& BHRB_EA
;
1480 pred
= val
& BHRB_PREDICTION
;
1481 target
= val
& BHRB_TARGET
;
1483 /* Probable Missed entry: Not applicable for POWER8 */
1484 if ((addr
== 0) && (target
== 0) && (pred
== 1)) {
1489 /* Real Missed entry: Power8 based missed entry */
1490 if ((addr
== 0) && (target
== 1) && (pred
== 1)) {
1495 /* Reserved condition: Not a valid entry */
1496 if ((addr
== 0) && (target
== 1) && (pred
== 0)) {
1501 /* Is a target address */
1502 if (val
& BHRB_TARGET
) {
1503 /* First address cannot be a target address */
1509 /* Update target address for the previous entry */
1510 cpuhw
->bhrb_entries
[u_index
- 1].to
= addr
;
1511 cpuhw
->bhrb_entries
[u_index
- 1].mispred
= pred
;
1512 cpuhw
->bhrb_entries
[u_index
- 1].predicted
= ~pred
;
1514 /* Dont increment u_index */
1517 /* Update address, flags for current entry */
1518 cpuhw
->bhrb_entries
[u_index
].from
= addr
;
1519 cpuhw
->bhrb_entries
[u_index
].mispred
= pred
;
1520 cpuhw
->bhrb_entries
[u_index
].predicted
= ~pred
;
1522 /* Successfully popullated one entry */
1528 cpuhw
->bhrb_stack
.nr
= u_index
;
1533 * A counter has overflowed; update its count and record
1534 * things if requested. Note that interrupts are hard-disabled
1535 * here so there is no possibility of being interrupted.
1537 static void record_and_restart(struct perf_event
*event
, unsigned long val
,
1538 struct pt_regs
*regs
)
1540 u64 period
= event
->hw
.sample_period
;
1541 s64 prev
, delta
, left
;
1544 if (event
->hw
.state
& PERF_HES_STOPPED
) {
1545 write_pmc(event
->hw
.idx
, 0);
1549 /* we don't have to worry about interrupts here */
1550 prev
= local64_read(&event
->hw
.prev_count
);
1551 delta
= check_and_compute_delta(prev
, val
);
1552 local64_add(delta
, &event
->count
);
1555 * See if the total period for this event has expired,
1556 * and update for the next period.
1559 left
= local64_read(&event
->hw
.period_left
) - delta
;
1567 record
= siar_valid(regs
);
1568 event
->hw
.last_period
= event
->hw
.sample_period
;
1570 if (left
< 0x80000000LL
)
1571 val
= 0x80000000LL
- left
;
1574 write_pmc(event
->hw
.idx
, val
);
1575 local64_set(&event
->hw
.prev_count
, val
);
1576 local64_set(&event
->hw
.period_left
, left
);
1577 perf_event_update_userpage(event
);
1580 * Finally record data if requested.
1583 struct perf_sample_data data
;
1585 perf_sample_data_init(&data
, ~0ULL, event
->hw
.last_period
);
1587 if (event
->attr
.sample_type
& PERF_SAMPLE_ADDR
)
1588 perf_get_data_addr(regs
, &data
.addr
);
1590 if (event
->attr
.sample_type
& PERF_SAMPLE_BRANCH_STACK
) {
1591 struct cpu_hw_events
*cpuhw
;
1592 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1593 power_pmu_bhrb_read(cpuhw
);
1594 data
.br_stack
= &cpuhw
->bhrb_stack
;
1597 if (perf_event_overflow(event
, &data
, regs
))
1598 power_pmu_stop(event
, 0);
1603 * Called from generic code to get the misc flags (i.e. processor mode)
1606 unsigned long perf_misc_flags(struct pt_regs
*regs
)
1608 u32 flags
= perf_get_misc_flags(regs
);
1612 return user_mode(regs
) ? PERF_RECORD_MISC_USER
:
1613 PERF_RECORD_MISC_KERNEL
;
1617 * Called from generic code to get the instruction pointer
1620 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
1622 bool use_siar
= regs_use_siar(regs
);
1624 if (use_siar
&& siar_valid(regs
))
1625 return mfspr(SPRN_SIAR
) + perf_ip_adjust(regs
);
1627 return 0; // no valid instruction pointer
1632 static bool pmc_overflow_power7(unsigned long val
)
1635 * Events on POWER7 can roll back if a speculative event doesn't
1636 * eventually complete. Unfortunately in some rare cases they will
1637 * raise a performance monitor exception. We need to catch this to
1638 * ensure we reset the PMC. In all cases the PMC will be 256 or less
1639 * cycles from overflow.
1641 * We only do this if the first pass fails to find any overflowing
1642 * PMCs because a user might set a period of less than 256 and we
1643 * don't want to mistakenly reset them.
1645 if ((0x80000000 - val
) <= 256)
1651 static bool pmc_overflow(unsigned long val
)
1660 * Performance monitor interrupt stuff
1662 static void perf_event_interrupt(struct pt_regs
*regs
)
1665 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1666 struct perf_event
*event
;
1667 unsigned long val
[8];
1671 if (cpuhw
->n_limited
)
1672 freeze_limited_counters(cpuhw
, mfspr(SPRN_PMC5
),
1675 perf_read_regs(regs
);
1677 nmi
= perf_intr_is_nmi(regs
);
1683 /* Read all the PMCs since we'll need them a bunch of times */
1684 for (i
= 0; i
< ppmu
->n_counter
; ++i
)
1685 val
[i
] = read_pmc(i
+ 1);
1687 /* Try to find what caused the IRQ */
1689 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
1690 if (!pmc_overflow(val
[i
]))
1692 if (is_limited_pmc(i
+ 1))
1693 continue; /* these won't generate IRQs */
1695 * We've found one that's overflowed. For active
1696 * counters we need to log this. For inactive
1697 * counters, we need to reset it anyway
1701 for (j
= 0; j
< cpuhw
->n_events
; ++j
) {
1702 event
= cpuhw
->event
[j
];
1703 if (event
->hw
.idx
== (i
+ 1)) {
1705 record_and_restart(event
, val
[i
], regs
);
1710 /* reset non active counters that have overflowed */
1711 write_pmc(i
+ 1, 0);
1713 if (!found
&& pvr_version_is(PVR_POWER7
)) {
1714 /* check active counters for special buggy p7 overflow */
1715 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1716 event
= cpuhw
->event
[i
];
1717 if (!event
->hw
.idx
|| is_limited_pmc(event
->hw
.idx
))
1719 if (pmc_overflow_power7(val
[event
->hw
.idx
- 1])) {
1720 /* event has overflowed in a buggy way*/
1722 record_and_restart(event
,
1723 val
[event
->hw
.idx
- 1],
1728 if ((!found
) && printk_ratelimit())
1729 printk(KERN_WARNING
"Can't find PMC that caused IRQ\n");
1732 * Reset MMCR0 to its normal value. This will set PMXE and
1733 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1734 * and thus allow interrupts to occur again.
1735 * XXX might want to use MSR.PM to keep the events frozen until
1736 * we get back out of this interrupt.
1738 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
1746 static void power_pmu_setup(int cpu
)
1748 struct cpu_hw_events
*cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
1752 memset(cpuhw
, 0, sizeof(*cpuhw
));
1753 cpuhw
->mmcr
[0] = MMCR0_FC
;
1756 static int __cpuinit
1757 power_pmu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
1759 unsigned int cpu
= (long)hcpu
;
1761 switch (action
& ~CPU_TASKS_FROZEN
) {
1762 case CPU_UP_PREPARE
:
1763 power_pmu_setup(cpu
);
1773 int __cpuinit
register_power_pmu(struct power_pmu
*pmu
)
1776 return -EBUSY
; /* something's already registered */
1779 pr_info("%s performance monitor hardware support registered\n",
1782 power_pmu
.attr_groups
= ppmu
->attr_groups
;
1786 * Use FCHV to ignore kernel events if MSR.HV is set.
1788 if (mfmsr() & MSR_HV
)
1789 freeze_events_kernel
= MMCR0_FCHV
;
1790 #endif /* CONFIG_PPC64 */
1792 perf_pmu_register(&power_pmu
, "cpu", PERF_TYPE_RAW
);
1793 perf_cpu_notifier(power_pmu_notifier
);