1 #include <linux/interrupt.h>
2 #include <linux/kernel.h>
3 #include <linux/spinlock.h>
5 #include "mach/pmu_v7.h"
7 static void smp_pmu_stop(void);
8 static void smp_pmu_start(void);
9 static void smp_pmu_reset(void);
10 static void smp_pmu_enable_event(void);
11 static void smp_pmu_read_counter(void);
12 static u32 __init
armv7_read_num_pmnc_events(void);
14 static int event_mask
= 0x8000003f;
16 struct arm_pmu armv7pmu
= {
17 .enable
= smp_pmu_enable_event
,
18 .read_counter
= smp_pmu_read_counter
,
19 .start
= smp_pmu_start
,
21 .reset
= smp_pmu_reset
,
22 .num_events
= (u32
)armv7_read_num_pmnc_events
,
24 .name
= "ARMv7 Cortex-A7",
26 /*PORTING-NOTE, per cpu has 4 event counter*/
29 ARMV7_L1_ICACHE_ACCESS
,
35 /*PORTING-NOTE, per cpu has one cnt_val*/
44 /*PORTING-NOTE, per cpu has one overflow variable*/
57 * armv7_pmnc_read: return the Performance Monitors Control Register
61 static inline unsigned long armv7_pmnc_read(void)
64 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
69 * armv7_pmnc_read: write value to the Performance Monitors Control Register
72 static inline void armv7_pmnc_write(unsigned long val
)
74 val
&= ARMV7_PMNC_MASK
;
76 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
79 * armv7_read_num_pmnc_events: return the Performance Monitors Control Register counter
82 static u32 __init
armv7_read_num_pmnc_events(void)
86 /* Read the nb of CNTx counters supported from PMNC */
87 nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
94 * armv7_pmnc_has_overflowed: return whether the performance counter is overflowed
95 * @pmnc: performance counter value
97 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc
)
99 return pmnc
& ARMV7_OVERFLOWED_MASK
;
103 * armv7_pmnc_counter_has_overflowed: return whether the performance counter is overflowed
104 * @pmnc: performance counter value
105 * @counter: performance counter number
107 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc
,
112 if (counter
== ARMV7_CYCLE_COUNTER
)
113 ret
= pmnc
& ARMV7_FLAG_C
;
114 else if ((counter
>= ARMV7_COUNTER0
) && (counter
<= ARMV7_COUNTER_LAST
))
115 ret
= pmnc
& ARMV7_FLAG_P(counter
);
117 pr_err("CPU%u checking wrong counter %d overflow status\n",
118 raw_smp_processor_id(), counter
);
124 * armv7_pmnc_counter_select_counter: select monitor counter and return the selected monitor counter register index
125 * to make sure the selected idx is not larger the maximum idx
126 * @idx: the performance counter index
128 static inline int armv7_pmnc_select_counter(unsigned int idx
)
132 if ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
)) {
133 pr_err("CPU%u selecting wrong PMNC counter"
134 " %d\n", raw_smp_processor_id(), idx
);
138 val
= (idx
- ARMV7_EVENT_CNT_TO_CNTx
) & ARMV7_SELECT_MASK
;
139 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val
));
146 * armv7pmu_read_counter: return Performance Monitors Cycle Count Register
147 * @idx: the performance counter index
149 static inline u32
armv7pmu_read_counter(int idx
)
151 unsigned long value
= 0;
153 if (idx
== ARMV7_CYCLE_COUNTER
)
154 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
155 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
156 if (armv7_pmnc_select_counter(idx
) == idx
)
157 asm volatile("mrc p15, 0, %0, c9, c13, 2"
160 pr_err("CPU%u reading wrong counter %d\n",
161 raw_smp_processor_id(), idx
);
167 * armv7pmu_write_counter: write value to specific Performance Monitors Cycle Count Register
168 * @idx: the performance counter index
170 static inline void armv7pmu_write_counter(int idx
, u32 value
)
172 if (idx
== ARMV7_CYCLE_COUNTER
)
173 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
174 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
175 if (armv7_pmnc_select_counter(idx
) == idx
)
176 asm volatile("mcr p15, 0, %0, c9, c13, 2"
179 pr_err("CPU%u writing wrong counter %d\n",
180 raw_smp_processor_id(), idx
);
183 static inline void armv7_pmnc_write_evtsel(unsigned int idx
, u32 val
)
185 if (armv7_pmnc_select_counter(idx
) == idx
) {
186 val
&= ARMV7_EVTSEL_MASK
;
187 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
192 * armv7_pmnc_enable_counter: enable the selected Performance Monitors Count register
193 * @idx: the performance counter index
195 static inline u32
armv7_pmnc_enable_counter(unsigned int idx
)
199 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
200 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
201 pr_err("CPU%u enabling wrong PMNC counter"
202 " %d\n", raw_smp_processor_id(), idx
);
206 if (idx
== ARMV7_CYCLE_COUNTER
)
207 val
= ARMV7_CNTENS_C
;
209 val
= ARMV7_CNTENS_P(idx
);
211 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val
));
217 * armv7_pmnc_disable_counter: disable the selected Performance Monitors Count register
218 * @idx: the performance counter index
220 static inline u32
armv7_pmnc_disable_counter(unsigned int idx
)
225 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
226 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
227 pr_err("CPU%u disabling wrong PMNC counter"
228 " %d\n", raw_smp_processor_id(), idx
);
232 if (idx
== ARMV7_CYCLE_COUNTER
)
233 val
= ARMV7_CNTENC_C
;
235 val
= ARMV7_CNTENC_P(idx
);
237 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val
));
243 static inline u32
armv7_pmnc_enable_interrupt(unsigned int idx
)
247 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
248 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
249 pr_err("CPU%u enabling wrong PMNC counter"
250 " interrupt enable %d\n", raw_smp_processor_id(), idx
);
254 if (idx
== ARMV7_CYCLE_COUNTER
)
255 val
= ARMV7_INTENS_C
;
257 val
= ARMV7_INTENS_P(idx
);
259 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val
));
264 static inline u32
armv7_pmnc_disable_interrupt(unsigned int idx
)
268 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
269 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
270 pr_err("CPU%u disabling wrong PMNC counter"
271 " interrupt enable %d\n", raw_smp_processor_id(), idx
);
275 if (idx
== ARMV7_CYCLE_COUNTER
)
276 val
= ARMV7_INTENC_C
;
278 val
= ARMV7_INTENC_P(idx
);
280 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val
));
287 * armv7_pmnc_get_overflow_status: get the overflow status.
290 static u32
armv7_pmnc_get_overflow_status(void)
294 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
296 /* Write to clear flags */
297 val
&= ARMV7_FLAG_MASK
;
298 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
304 static void armv7_pmnc_dump_regs(void)
309 printk(KERN_INFO
"PMNC registers dump:\n");
311 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
312 printk(KERN_INFO
"PMNC =0x%08x\n", val
);
314 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
315 printk(KERN_INFO
"CNTENS=0x%08x\n", val
);
317 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
318 printk(KERN_INFO
"INTENS=0x%08x\n", val
);
320 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
321 printk(KERN_INFO
"FLAGS =0x%08x\n", val
);
323 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
324 printk(KERN_INFO
"SELECT=0x%08x\n", val
);
326 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
327 printk(KERN_INFO
"CCNT =0x%08x\n", val
);
329 for (cnt
= ARMV7_COUNTER0
; cnt
< ARMV7_COUNTER_LAST
; cnt
++) {
330 armv7_pmnc_select_counter(cnt
);
331 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
332 printk(KERN_INFO
"CNT[%d] count =0x%08x\n",
333 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
334 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
335 printk(KERN_INFO
"CNT[%d] evtsel=0x%08x\n",
336 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
341 * armv7pmu_enable_event: enable the selected performance counter register
342 * @event: selected event
343 * @idx: selected performance counter register
346 static void armv7pmu_enable_event(u32 event
, int idx
)
350 * Enable counter and interrupt, and set the counter to count
351 * the event that we're interested in.
358 armv7_pmnc_disable_counter(idx
);
361 * Set event (if destined for PMNx counters)
362 * We don't need to set the event if it's a cycle count
364 if (idx
!= ARMV7_CYCLE_COUNTER
)
365 armv7_pmnc_write_evtsel(idx
, event
);
368 * Enable interrupt for this counter
370 //armv7_pmnc_enable_interrupt(idx);
375 armv7_pmnc_enable_counter(idx
);
381 * armv7pmu_disable_event: disable the selected performance counter register
382 * @event: selected event
383 * @idx: selected performance counter register
385 static void armv7pmu_disable_event(u32 event
, int idx
)
389 * Disable counter and interrupt
396 armv7_pmnc_disable_counter(idx
);
399 * Disable interrupt for this counter
401 //armv7_pmnc_disable_interrupt(idx);
407 * armv7pmu_start:All counters, including PMCCNTR, are enabled.
410 static void armv7pmu_start(void *info
)
412 /* Enable all counters */
413 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
417 * armv7pmu_stop:All counters, including PMCCNTR, are disabled.
421 static void armv7pmu_stop(void *info
)
423 /* Disable all counters */
424 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
428 * armv7pmu_reset:All counters, including PMCCNTR, are reseted.
431 static void armv7pmu_reset(void *info
)
434 /* The counter and interrupt enable registers are unknown at reset. */
435 //for (idx = 0; idx < NUMBER_OF_EVENT; ++idx)
436 // armv7pmu_disable_event(NULL, idx);
438 //armv7_pmnc_disable_counter(ARMV7_CYCLE_COUNTER);
440 /* Initialize & Reset PMNC: C and P bits */
441 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
445 * armv7pmu_enable: eanble all event, All counters, including PMCCNTR, are enabled
448 static void armv7pmu_enable(void *info
)
451 struct pmu_cfg
*p_cfg
= (struct pmu_cfg
*) &armv7pmu
.perf_cfg
;
453 armv7pmu_reset(NULL
);
456 armv7_pmnc_enable_counter(ARMV7_CYCLE_COUNTER
);
458 armv7_pmnc_disable_counter(ARMV7_CYCLE_COUNTER
);
460 for (idx
= 0; idx
< NUMBER_OF_EVENT
; idx
++) {
461 if( (event_mask
>> idx
) & EVENT_MASK
)
462 armv7pmu_enable_event(p_cfg
->event_cfg
[idx
], idx
);
464 armv7pmu_disable_event(0, idx
);
469 * armv7pmu_read_all_counter: read all counter, including PMCCNTR
472 static void armv7pmu_read_all_counter(void *info
)
474 int idx
, cpu
= raw_smp_processor_id();
475 struct pmu_data
*p_data
= (struct pmu_data
*) &armv7pmu
.perf_data
;
477 for (idx
= 0; idx
< NUMBER_OF_EVENT
+ 1; idx
++){
478 p_data
->cnt_val
[cpu
][idx
] = armv7pmu_read_counter(idx
);
481 p_data
->overflow
[cpu
] = armv7_pmnc_get_overflow_status();
484 static void smp_pmu_stop(void)
488 if(armv7pmu
.multicore
)
490 for(i
= 0; i
< NUMBER_OF_CPU
; i
++)
491 mtk_smp_call_function_single(i
, armv7pmu_stop
, NULL
, 1);
497 static void smp_pmu_start(void)
501 if(armv7pmu
.multicore
)
503 for(i
= 0; i
< NUMBER_OF_CPU
; i
++)
504 mtk_smp_call_function_single(i
, armv7pmu_start
, NULL
, 1);
507 armv7pmu_start(NULL
);
510 static void smp_pmu_reset(void)
514 if(armv7pmu
.multicore
)
516 for(i
= 0; i
< NUMBER_OF_CPU
; i
++)
517 mtk_smp_call_function_single(i
, armv7pmu_reset
, NULL
, 1);
520 armv7pmu_reset(NULL
);
523 static void smp_pmu_enable_event(void)
527 if(armv7pmu
.multicore
)
529 for(i
= 0; i
< NUMBER_OF_CPU
; i
++)
530 mtk_smp_call_function_single(i
, armv7pmu_enable
, NULL
, 1);
533 armv7pmu_enable(NULL
);
536 /*static void smp_pmu_disable_event(void)
541 static void smp_pmu_read_counter(void)
545 if(armv7pmu
.multicore
)
547 for(i
= 0; i
< NUMBER_OF_CPU
; i
++)
548 mtk_smp_call_function_single(i
, armv7pmu_read_all_counter
, NULL
, 1);
551 armv7pmu_read_all_counter(NULL
);
554 int register_pmu(struct arm_pmu
**p_pmu
)
562 void unregister_pmu(struct arm_pmu
**p_pmu
)