Commit | Line | Data |
---|---|---|
a1110654 SW |
1 | /* |
2 | * Performance event support - Freescale Embedded Performance Monitor | |
3 | * | |
4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | |
5 | * Copyright 2010 Freescale Semiconductor, Inc. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/perf_event.h> | |
15 | #include <linux/percpu.h> | |
16 | #include <linux/hardirq.h> | |
17 | #include <asm/reg_fsl_emb.h> | |
18 | #include <asm/pmc.h> | |
19 | #include <asm/machdep.h> | |
20 | #include <asm/firmware.h> | |
21 | #include <asm/ptrace.h> | |
22 | ||
23 | struct cpu_hw_events { | |
24 | int n_events; | |
25 | int disabled; | |
26 | u8 pmcs_enabled; | |
27 | struct perf_event *event[MAX_HWEVENTS]; | |
28 | }; | |
29 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | |
30 | ||
31 | static struct fsl_emb_pmu *ppmu; | |
32 | ||
33 | /* Number of perf_events counting hardware events */ | |
34 | static atomic_t num_events; | |
35 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | |
36 | static DEFINE_MUTEX(pmc_reserve_mutex); | |
37 | ||
38 | /* | |
39 | * If interrupts were soft-disabled when a PMU interrupt occurs, treat | |
40 | * it as an NMI. | |
41 | */ | |
42 | static inline int perf_intr_is_nmi(struct pt_regs *regs) | |
43 | { | |
44 | #ifdef __powerpc64__ | |
45 | return !regs->softe; | |
46 | #else | |
47 | return 0; | |
48 | #endif | |
49 | } | |
50 | ||
51 | static void perf_event_interrupt(struct pt_regs *regs); | |
52 | ||
53 | /* | |
54 | * Read one performance monitor counter (PMC). | |
55 | */ | |
56 | static unsigned long read_pmc(int idx) | |
57 | { | |
58 | unsigned long val; | |
59 | ||
60 | switch (idx) { | |
61 | case 0: | |
62 | val = mfpmr(PMRN_PMC0); | |
63 | break; | |
64 | case 1: | |
65 | val = mfpmr(PMRN_PMC1); | |
66 | break; | |
67 | case 2: | |
68 | val = mfpmr(PMRN_PMC2); | |
69 | break; | |
70 | case 3: | |
71 | val = mfpmr(PMRN_PMC3); | |
72 | break; | |
73 | default: | |
74 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); | |
75 | val = 0; | |
76 | } | |
77 | return val; | |
78 | } | |
79 | ||
80 | /* | |
81 | * Write one PMC. | |
82 | */ | |
83 | static void write_pmc(int idx, unsigned long val) | |
84 | { | |
85 | switch (idx) { | |
86 | case 0: | |
87 | mtpmr(PMRN_PMC0, val); | |
88 | break; | |
89 | case 1: | |
90 | mtpmr(PMRN_PMC1, val); | |
91 | break; | |
92 | case 2: | |
93 | mtpmr(PMRN_PMC2, val); | |
94 | break; | |
95 | case 3: | |
96 | mtpmr(PMRN_PMC3, val); | |
97 | break; | |
98 | default: | |
99 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); | |
100 | } | |
101 | ||
102 | isync(); | |
103 | } | |
104 | ||
105 | /* | |
106 | * Write one local control A register | |
107 | */ | |
108 | static void write_pmlca(int idx, unsigned long val) | |
109 | { | |
110 | switch (idx) { | |
111 | case 0: | |
112 | mtpmr(PMRN_PMLCA0, val); | |
113 | break; | |
114 | case 1: | |
115 | mtpmr(PMRN_PMLCA1, val); | |
116 | break; | |
117 | case 2: | |
118 | mtpmr(PMRN_PMLCA2, val); | |
119 | break; | |
120 | case 3: | |
121 | mtpmr(PMRN_PMLCA3, val); | |
122 | break; | |
123 | default: | |
124 | printk(KERN_ERR "oops trying to write PMLCA%d\n", idx); | |
125 | } | |
126 | ||
127 | isync(); | |
128 | } | |
129 | ||
130 | /* | |
131 | * Write one local control B register | |
132 | */ | |
133 | static void write_pmlcb(int idx, unsigned long val) | |
134 | { | |
135 | switch (idx) { | |
136 | case 0: | |
137 | mtpmr(PMRN_PMLCB0, val); | |
138 | break; | |
139 | case 1: | |
140 | mtpmr(PMRN_PMLCB1, val); | |
141 | break; | |
142 | case 2: | |
143 | mtpmr(PMRN_PMLCB2, val); | |
144 | break; | |
145 | case 3: | |
146 | mtpmr(PMRN_PMLCB3, val); | |
147 | break; | |
148 | default: | |
149 | printk(KERN_ERR "oops trying to write PMLCB%d\n", idx); | |
150 | } | |
151 | ||
152 | isync(); | |
153 | } | |
154 | ||
155 | static void fsl_emb_pmu_read(struct perf_event *event) | |
156 | { | |
157 | s64 val, delta, prev; | |
158 | ||
159 | /* | |
160 | * Performance monitor interrupts come even when interrupts | |
161 | * are soft-disabled, as long as interrupts are hard-enabled. | |
162 | * Therefore we treat them like NMIs. | |
163 | */ | |
164 | do { | |
09f86cd0 | 165 | prev = local64_read(&event->hw.prev_count); |
a1110654 SW |
166 | barrier(); |
167 | val = read_pmc(event->hw.idx); | |
09f86cd0 | 168 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
a1110654 SW |
169 | |
170 | /* The counters are only 32 bits wide */ | |
171 | delta = (val - prev) & 0xfffffffful; | |
09f86cd0 PZ |
172 | local64_add(delta, &event->count); |
173 | local64_sub(delta, &event->hw.period_left); | |
a1110654 SW |
174 | } |
175 | ||
176 | /* | |
177 | * Disable all events to prevent PMU interrupts and to allow | |
178 | * events to be added or removed. | |
179 | */ | |
180 | void hw_perf_disable(void) | |
181 | { | |
182 | struct cpu_hw_events *cpuhw; | |
183 | unsigned long flags; | |
184 | ||
185 | local_irq_save(flags); | |
186 | cpuhw = &__get_cpu_var(cpu_hw_events); | |
187 | ||
188 | if (!cpuhw->disabled) { | |
189 | cpuhw->disabled = 1; | |
190 | ||
191 | /* | |
192 | * Check if we ever enabled the PMU on this cpu. | |
193 | */ | |
194 | if (!cpuhw->pmcs_enabled) { | |
195 | ppc_enable_pmcs(); | |
196 | cpuhw->pmcs_enabled = 1; | |
197 | } | |
198 | ||
199 | if (atomic_read(&num_events)) { | |
200 | /* | |
201 | * Set the 'freeze all counters' bit, and disable | |
202 | * interrupts. The barrier is to make sure the | |
203 | * mtpmr has been executed and the PMU has frozen | |
204 | * the events before we return. | |
205 | */ | |
206 | ||
207 | mtpmr(PMRN_PMGC0, PMGC0_FAC); | |
208 | isync(); | |
209 | } | |
210 | } | |
211 | local_irq_restore(flags); | |
212 | } | |
213 | ||
214 | /* | |
215 | * Re-enable all events if disable == 0. | |
216 | * If we were previously disabled and events were added, then | |
217 | * put the new config on the PMU. | |
218 | */ | |
219 | void hw_perf_enable(void) | |
220 | { | |
221 | struct cpu_hw_events *cpuhw; | |
222 | unsigned long flags; | |
223 | ||
224 | local_irq_save(flags); | |
225 | cpuhw = &__get_cpu_var(cpu_hw_events); | |
226 | if (!cpuhw->disabled) | |
227 | goto out; | |
228 | ||
229 | cpuhw->disabled = 0; | |
230 | ppc_set_pmu_inuse(cpuhw->n_events != 0); | |
231 | ||
232 | if (cpuhw->n_events > 0) { | |
233 | mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); | |
234 | isync(); | |
235 | } | |
236 | ||
237 | out: | |
238 | local_irq_restore(flags); | |
239 | } | |
240 | ||
241 | static int collect_events(struct perf_event *group, int max_count, | |
242 | struct perf_event *ctrs[]) | |
243 | { | |
244 | int n = 0; | |
245 | struct perf_event *event; | |
246 | ||
247 | if (!is_software_event(group)) { | |
248 | if (n >= max_count) | |
249 | return -1; | |
250 | ctrs[n] = group; | |
251 | n++; | |
252 | } | |
253 | list_for_each_entry(event, &group->sibling_list, group_entry) { | |
254 | if (!is_software_event(event) && | |
255 | event->state != PERF_EVENT_STATE_OFF) { | |
256 | if (n >= max_count) | |
257 | return -1; | |
258 | ctrs[n] = event; | |
259 | n++; | |
260 | } | |
261 | } | |
262 | return n; | |
263 | } | |
264 | ||
265 | /* perf must be disabled, context locked on entry */ | |
266 | static int fsl_emb_pmu_enable(struct perf_event *event) | |
267 | { | |
268 | struct cpu_hw_events *cpuhw; | |
269 | int ret = -EAGAIN; | |
270 | int num_counters = ppmu->n_counter; | |
271 | u64 val; | |
272 | int i; | |
273 | ||
274 | cpuhw = &get_cpu_var(cpu_hw_events); | |
275 | ||
276 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) | |
277 | num_counters = ppmu->n_restricted; | |
278 | ||
279 | /* | |
280 | * Allocate counters from top-down, so that restricted-capable | |
281 | * counters are kept free as long as possible. | |
282 | */ | |
283 | for (i = num_counters - 1; i >= 0; i--) { | |
284 | if (cpuhw->event[i]) | |
285 | continue; | |
286 | ||
287 | break; | |
288 | } | |
289 | ||
290 | if (i < 0) | |
291 | goto out; | |
292 | ||
293 | event->hw.idx = i; | |
294 | cpuhw->event[i] = event; | |
295 | ++cpuhw->n_events; | |
296 | ||
297 | val = 0; | |
298 | if (event->hw.sample_period) { | |
09f86cd0 | 299 | s64 left = local64_read(&event->hw.period_left); |
a1110654 SW |
300 | if (left < 0x80000000L) |
301 | val = 0x80000000L - left; | |
302 | } | |
09f86cd0 | 303 | local64_set(&event->hw.prev_count, val); |
a1110654 SW |
304 | write_pmc(i, val); |
305 | perf_event_update_userpage(event); | |
306 | ||
307 | write_pmlcb(i, event->hw.config >> 32); | |
308 | write_pmlca(i, event->hw.config_base); | |
309 | ||
310 | ret = 0; | |
311 | out: | |
312 | put_cpu_var(cpu_hw_events); | |
313 | return ret; | |
314 | } | |
315 | ||
316 | /* perf must be disabled, context locked on entry */ | |
317 | static void fsl_emb_pmu_disable(struct perf_event *event) | |
318 | { | |
319 | struct cpu_hw_events *cpuhw; | |
320 | int i = event->hw.idx; | |
321 | ||
322 | if (i < 0) | |
323 | goto out; | |
324 | ||
325 | fsl_emb_pmu_read(event); | |
326 | ||
327 | cpuhw = &get_cpu_var(cpu_hw_events); | |
328 | ||
329 | WARN_ON(event != cpuhw->event[event->hw.idx]); | |
330 | ||
331 | write_pmlca(i, 0); | |
332 | write_pmlcb(i, 0); | |
333 | write_pmc(i, 0); | |
334 | ||
335 | cpuhw->event[i] = NULL; | |
336 | event->hw.idx = -1; | |
337 | ||
338 | /* | |
339 | * TODO: if at least one restricted event exists, and we | |
340 | * just freed up a non-restricted-capable counter, and | |
341 | * there is a restricted-capable counter occupied by | |
342 | * a non-restricted event, migrate that event to the | |
343 | * vacated counter. | |
344 | */ | |
345 | ||
346 | cpuhw->n_events--; | |
347 | ||
348 | out: | |
349 | put_cpu_var(cpu_hw_events); | |
350 | } | |
351 | ||
352 | /* | |
353 | * Re-enable interrupts on a event after they were throttled | |
354 | * because they were coming too fast. | |
355 | * | |
356 | * Context is locked on entry, but perf is not disabled. | |
357 | */ | |
358 | static void fsl_emb_pmu_unthrottle(struct perf_event *event) | |
359 | { | |
360 | s64 val, left; | |
361 | unsigned long flags; | |
362 | ||
363 | if (event->hw.idx < 0 || !event->hw.sample_period) | |
364 | return; | |
365 | local_irq_save(flags); | |
366 | perf_disable(); | |
367 | fsl_emb_pmu_read(event); | |
368 | left = event->hw.sample_period; | |
369 | event->hw.last_period = left; | |
370 | val = 0; | |
371 | if (left < 0x80000000L) | |
372 | val = 0x80000000L - left; | |
373 | write_pmc(event->hw.idx, val); | |
09f86cd0 PZ |
374 | local64_set(&event->hw.prev_count, val); |
375 | local64_set(&event->hw.period_left, left); | |
a1110654 SW |
376 | perf_event_update_userpage(event); |
377 | perf_enable(); | |
378 | local_irq_restore(flags); | |
379 | } | |
380 | ||
381 | static struct pmu fsl_emb_pmu = { | |
382 | .enable = fsl_emb_pmu_enable, | |
383 | .disable = fsl_emb_pmu_disable, | |
384 | .read = fsl_emb_pmu_read, | |
385 | .unthrottle = fsl_emb_pmu_unthrottle, | |
386 | }; | |
387 | ||
388 | /* | |
389 | * Release the PMU if this is the last perf_event. | |
390 | */ | |
391 | static void hw_perf_event_destroy(struct perf_event *event) | |
392 | { | |
393 | if (!atomic_add_unless(&num_events, -1, 1)) { | |
394 | mutex_lock(&pmc_reserve_mutex); | |
395 | if (atomic_dec_return(&num_events) == 0) | |
396 | release_pmc_hardware(); | |
397 | mutex_unlock(&pmc_reserve_mutex); | |
398 | } | |
399 | } | |
400 | ||
401 | /* | |
402 | * Translate a generic cache event_id config to a raw event_id code. | |
403 | */ | |
404 | static int hw_perf_cache_event(u64 config, u64 *eventp) | |
405 | { | |
406 | unsigned long type, op, result; | |
407 | int ev; | |
408 | ||
409 | if (!ppmu->cache_events) | |
410 | return -EINVAL; | |
411 | ||
412 | /* unpack config */ | |
413 | type = config & 0xff; | |
414 | op = (config >> 8) & 0xff; | |
415 | result = (config >> 16) & 0xff; | |
416 | ||
417 | if (type >= PERF_COUNT_HW_CACHE_MAX || | |
418 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | |
419 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
420 | return -EINVAL; | |
421 | ||
422 | ev = (*ppmu->cache_events)[type][op][result]; | |
423 | if (ev == 0) | |
424 | return -EOPNOTSUPP; | |
425 | if (ev == -1) | |
426 | return -EINVAL; | |
427 | *eventp = ev; | |
428 | return 0; | |
429 | } | |
430 | ||
431 | const struct pmu *hw_perf_event_init(struct perf_event *event) | |
432 | { | |
433 | u64 ev; | |
434 | struct perf_event *events[MAX_HWEVENTS]; | |
435 | int n; | |
436 | int err; | |
437 | int num_restricted; | |
438 | int i; | |
439 | ||
440 | switch (event->attr.type) { | |
441 | case PERF_TYPE_HARDWARE: | |
442 | ev = event->attr.config; | |
443 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | |
444 | return ERR_PTR(-EOPNOTSUPP); | |
445 | ev = ppmu->generic_events[ev]; | |
446 | break; | |
447 | ||
448 | case PERF_TYPE_HW_CACHE: | |
449 | err = hw_perf_cache_event(event->attr.config, &ev); | |
450 | if (err) | |
451 | return ERR_PTR(err); | |
452 | break; | |
453 | ||
454 | case PERF_TYPE_RAW: | |
455 | ev = event->attr.config; | |
456 | break; | |
457 | ||
458 | default: | |
459 | return ERR_PTR(-EINVAL); | |
460 | } | |
461 | ||
462 | event->hw.config = ppmu->xlate_event(ev); | |
463 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) | |
464 | return ERR_PTR(-EINVAL); | |
465 | ||
466 | /* | |
467 | * If this is in a group, check if it can go on with all the | |
468 | * other hardware events in the group. We assume the event | |
469 | * hasn't been linked into its leader's sibling list at this point. | |
470 | */ | |
471 | n = 0; | |
472 | if (event->group_leader != event) { | |
473 | n = collect_events(event->group_leader, | |
474 | ppmu->n_counter - 1, events); | |
475 | if (n < 0) | |
476 | return ERR_PTR(-EINVAL); | |
477 | } | |
478 | ||
479 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { | |
480 | num_restricted = 0; | |
481 | for (i = 0; i < n; i++) { | |
482 | if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED) | |
483 | num_restricted++; | |
484 | } | |
485 | ||
486 | if (num_restricted >= ppmu->n_restricted) | |
487 | return ERR_PTR(-EINVAL); | |
488 | } | |
489 | ||
490 | event->hw.idx = -1; | |
491 | ||
492 | event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | | |
493 | (u32)((ev << 16) & PMLCA_EVENT_MASK); | |
494 | ||
495 | if (event->attr.exclude_user) | |
496 | event->hw.config_base |= PMLCA_FCU; | |
497 | if (event->attr.exclude_kernel) | |
498 | event->hw.config_base |= PMLCA_FCS; | |
499 | if (event->attr.exclude_idle) | |
500 | return ERR_PTR(-ENOTSUPP); | |
501 | ||
502 | event->hw.last_period = event->hw.sample_period; | |
09f86cd0 | 503 | local64_set(&event->hw.period_left, event->hw.last_period); |
a1110654 SW |
504 | |
505 | /* | |
506 | * See if we need to reserve the PMU. | |
507 | * If no events are currently in use, then we have to take a | |
508 | * mutex to ensure that we don't race with another task doing | |
509 | * reserve_pmc_hardware or release_pmc_hardware. | |
510 | */ | |
511 | err = 0; | |
512 | if (!atomic_inc_not_zero(&num_events)) { | |
513 | mutex_lock(&pmc_reserve_mutex); | |
514 | if (atomic_read(&num_events) == 0 && | |
515 | reserve_pmc_hardware(perf_event_interrupt)) | |
516 | err = -EBUSY; | |
517 | else | |
518 | atomic_inc(&num_events); | |
519 | mutex_unlock(&pmc_reserve_mutex); | |
520 | ||
521 | mtpmr(PMRN_PMGC0, PMGC0_FAC); | |
522 | isync(); | |
523 | } | |
524 | event->destroy = hw_perf_event_destroy; | |
525 | ||
526 | if (err) | |
527 | return ERR_PTR(err); | |
528 | return &fsl_emb_pmu; | |
529 | } | |
530 | ||
531 | /* | |
532 | * A counter has overflowed; update its count and record | |
533 | * things if requested. Note that interrupts are hard-disabled | |
534 | * here so there is no possibility of being interrupted. | |
535 | */ | |
536 | static void record_and_restart(struct perf_event *event, unsigned long val, | |
537 | struct pt_regs *regs, int nmi) | |
538 | { | |
539 | u64 period = event->hw.sample_period; | |
540 | s64 prev, delta, left; | |
541 | int record = 0; | |
542 | ||
543 | /* we don't have to worry about interrupts here */ | |
09f86cd0 | 544 | prev = local64_read(&event->hw.prev_count); |
a1110654 | 545 | delta = (val - prev) & 0xfffffffful; |
09f86cd0 | 546 | local64_add(delta, &event->count); |
a1110654 SW |
547 | |
548 | /* | |
549 | * See if the total period for this event has expired, | |
550 | * and update for the next period. | |
551 | */ | |
552 | val = 0; | |
09f86cd0 | 553 | left = local64_read(&event->hw.period_left) - delta; |
a1110654 SW |
554 | if (period) { |
555 | if (left <= 0) { | |
556 | left += period; | |
557 | if (left <= 0) | |
558 | left = period; | |
559 | record = 1; | |
560 | } | |
561 | if (left < 0x80000000LL) | |
562 | val = 0x80000000LL - left; | |
563 | } | |
564 | ||
565 | /* | |
566 | * Finally record data if requested. | |
567 | */ | |
568 | if (record) { | |
6b95ed34 PZ |
569 | struct perf_sample_data data; |
570 | ||
571 | perf_sample_data_init(&data, 0); | |
69e77a8b | 572 | data.period = event->hw.last_period; |
a1110654 SW |
573 | |
574 | if (perf_event_overflow(event, nmi, &data, regs)) { | |
575 | /* | |
576 | * Interrupts are coming too fast - throttle them | |
577 | * by setting the event to 0, so it will be | |
578 | * at least 2^30 cycles until the next interrupt | |
579 | * (assuming each event counts at most 2 counts | |
580 | * per cycle). | |
581 | */ | |
582 | val = 0; | |
583 | left = ~0ULL >> 1; | |
584 | } | |
585 | } | |
586 | ||
587 | write_pmc(event->hw.idx, val); | |
09f86cd0 PZ |
588 | local64_set(&event->hw.prev_count, val); |
589 | local64_set(&event->hw.period_left, left); | |
a1110654 SW |
590 | perf_event_update_userpage(event); |
591 | } | |
592 | ||
593 | static void perf_event_interrupt(struct pt_regs *regs) | |
594 | { | |
595 | int i; | |
596 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | |
597 | struct perf_event *event; | |
598 | unsigned long val; | |
599 | int found = 0; | |
600 | int nmi; | |
601 | ||
602 | nmi = perf_intr_is_nmi(regs); | |
603 | if (nmi) | |
604 | nmi_enter(); | |
605 | else | |
606 | irq_enter(); | |
607 | ||
608 | for (i = 0; i < ppmu->n_counter; ++i) { | |
609 | event = cpuhw->event[i]; | |
610 | ||
611 | val = read_pmc(i); | |
612 | if ((int)val < 0) { | |
613 | if (event) { | |
614 | /* event has overflowed */ | |
615 | found = 1; | |
616 | record_and_restart(event, val, regs, nmi); | |
617 | } else { | |
618 | /* | |
619 | * Disabled counter is negative, | |
620 | * reset it just in case. | |
621 | */ | |
622 | write_pmc(i, 0); | |
623 | } | |
624 | } | |
625 | } | |
626 | ||
627 | /* PMM will keep counters frozen until we return from the interrupt. */ | |
628 | mtmsr(mfmsr() | MSR_PMM); | |
629 | mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); | |
630 | isync(); | |
631 | ||
632 | if (nmi) | |
633 | nmi_exit(); | |
634 | else | |
635 | irq_exit(); | |
636 | } | |
637 | ||
638 | void hw_perf_event_setup(int cpu) | |
639 | { | |
640 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | |
641 | ||
642 | memset(cpuhw, 0, sizeof(*cpuhw)); | |
643 | } | |
644 | ||
645 | int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) | |
646 | { | |
647 | if (ppmu) | |
648 | return -EBUSY; /* something's already registered */ | |
649 | ||
650 | ppmu = pmu; | |
651 | pr_info("%s performance monitor hardware support registered\n", | |
652 | pmu->name); | |
653 | ||
654 | return 0; | |
655 | } |