import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-mt8127 / pmu_v7.c
1 #include <linux/interrupt.h>
2 #include <linux/kernel.h>
3 #include <linux/spinlock.h>
4 #include <linux/fs.h>
5 #include "mach/pmu_v7.h"
6
7 static void smp_pmu_stop(void);
8 static void smp_pmu_start(void);
9 static void smp_pmu_reset(void);
10 static void smp_pmu_enable_event(void);
11 static void smp_pmu_read_counter(void);
12 static u32 __init armv7_read_num_pmnc_events(void);
13
14 static int event_mask = 0x8000003f;
15
16 struct arm_pmu armv7pmu = {
17 .enable = smp_pmu_enable_event,
18 .read_counter = smp_pmu_read_counter,
19 .start = smp_pmu_start,
20 .stop = smp_pmu_stop,
21 .reset = smp_pmu_reset,
22 .num_events = (u32)armv7_read_num_pmnc_events,
23 .id = ARM_PMU_ID_CA7,
24 .name = "ARMv7 Cortex-A7",
25 .perf_cfg = {
26 /*PORTING-NOTE, per cpu has 4 event counter*/
27 .event_cfg = {
28 ARMV7_IFETCH_MISS,
29 ARMV7_L1_ICACHE_ACCESS,
30 ARMV7_DCACHE_REFILL,
31 ARMV7_DCACHE_ACCESS,
32 },
33 },
34 .perf_data = {
35 /*PORTING-NOTE, per cpu has one cnt_val*/
36 .cnt_val = {
37 {0,0,0,0,0},
38 {0,0,0,0,0},
39 {0,0,0,0,0},
40 #if (NR_CPUS >= 4)
41 {0,0,0,0,0}
42 #endif
43 },
44 /*PORTING-NOTE, per cpu has one overflow variable*/
45 .overflow = {
46 0,
47 0,
48 0,
49 #if (NR_CPUS >= 4)
50 0
51 #endif
52 },
53 },
54 };
55
56 /*
57 * armv7_pmnc_read: return the Performance Monitors Control Register
58 * @:
59 */
60
61 static inline unsigned long armv7_pmnc_read(void)
62 {
63 u32 val;
64 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
65 return val;
66 }
67
68 /*
69 * armv7_pmnc_read: write value to the Performance Monitors Control Register
70 * @:
71 */
72 static inline void armv7_pmnc_write(unsigned long val)
73 {
74 val &= ARMV7_PMNC_MASK;
75 isb();
76 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
77 }
78 /*
79 * armv7_read_num_pmnc_events: return the Performance Monitors Control Register counter
80 * @:
81 */
82 static u32 __init armv7_read_num_pmnc_events(void)
83 {
84 u32 nb_cnt;
85
86 /* Read the nb of CNTx counters supported from PMNC */
87 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
88
89 return nb_cnt;
90 }
91
92
93 /*
94 * armv7_pmnc_has_overflowed: return whether the performance counter is overflowed
95 * @pmnc: performance counter value
96 */
97 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
98 {
99 return pmnc & ARMV7_OVERFLOWED_MASK;
100 }
101
102 /*
103 * armv7_pmnc_counter_has_overflowed: return whether the performance counter is overflowed
104 * @pmnc: performance counter value
105 * @counter: performance counter number
106 */
107 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
108 int counter)
109 {
110 int ret = 0;
111
112 if (counter == ARMV7_CYCLE_COUNTER)
113 ret = pmnc & ARMV7_FLAG_C;
114 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
115 ret = pmnc & ARMV7_FLAG_P(counter);
116 else
117 pr_err("CPU%u checking wrong counter %d overflow status\n",
118 raw_smp_processor_id(), counter);
119
120 return ret;
121 }
122
123 /*
124 * armv7_pmnc_counter_select_counter: select monitor counter and return the selected monitor counter register index
125 * to make sure the selected idx is not larger the maximum idx
126 * @idx: the performance counter index
127 */
128 static inline int armv7_pmnc_select_counter(unsigned int idx)
129 {
130 u32 val;
131
132 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
133 pr_err("CPU%u selecting wrong PMNC counter"
134 " %d\n", raw_smp_processor_id(), idx);
135 return -1;
136 }
137 // why
138 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
139 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
140 isb();
141
142 return idx;
143 }
144
145 /*
146 * armv7pmu_read_counter: return Performance Monitors Cycle Count Register
147 * @idx: the performance counter index
148 */
149 static inline u32 armv7pmu_read_counter(int idx)
150 {
151 unsigned long value = 0;
152
153 if (idx == ARMV7_CYCLE_COUNTER)
154 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
155 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
156 if (armv7_pmnc_select_counter(idx) == idx)
157 asm volatile("mrc p15, 0, %0, c9, c13, 2"
158 : "=r" (value));
159 } else
160 pr_err("CPU%u reading wrong counter %d\n",
161 raw_smp_processor_id(), idx);
162
163 return value;
164 }
165
166 /*
167 * armv7pmu_write_counter: write value to specific Performance Monitors Cycle Count Register
168 * @idx: the performance counter index
169 */
170 static inline void armv7pmu_write_counter(int idx, u32 value)
171 {
172 if (idx == ARMV7_CYCLE_COUNTER)
173 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
174 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
175 if (armv7_pmnc_select_counter(idx) == idx)
176 asm volatile("mcr p15, 0, %0, c9, c13, 2"
177 : : "r" (value));
178 } else
179 pr_err("CPU%u writing wrong counter %d\n",
180 raw_smp_processor_id(), idx);
181 }
182
183 static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
184 {
185 if (armv7_pmnc_select_counter(idx) == idx) {
186 val &= ARMV7_EVTSEL_MASK;
187 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
188 }
189 }
190
191 /*
192 * armv7_pmnc_enable_counter: enable the selected Performance Monitors Count register
193 * @idx: the performance counter index
194 */
195 static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
196 {
197 u32 val;
198
199 if ((idx != ARMV7_CYCLE_COUNTER) &&
200 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
201 pr_err("CPU%u enabling wrong PMNC counter"
202 " %d\n", raw_smp_processor_id(), idx);
203 return -1;
204 }
205
206 if (idx == ARMV7_CYCLE_COUNTER)
207 val = ARMV7_CNTENS_C;
208 else
209 val = ARMV7_CNTENS_P(idx);
210
211 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
212
213 return idx;
214 }
215
216 /*
217 * armv7_pmnc_disable_counter: disable the selected Performance Monitors Count register
218 * @idx: the performance counter index
219 */
220 static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
221 {
222 u32 val;
223
224
225 if ((idx != ARMV7_CYCLE_COUNTER) &&
226 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
227 pr_err("CPU%u disabling wrong PMNC counter"
228 " %d\n", raw_smp_processor_id(), idx);
229 return -1;
230 }
231
232 if (idx == ARMV7_CYCLE_COUNTER)
233 val = ARMV7_CNTENC_C;
234 else
235 val = ARMV7_CNTENC_P(idx);
236
237 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
238
239 return idx;
240 }
241
242 #if 0
243 static inline u32 armv7_pmnc_enable_interrupt(unsigned int idx)
244 {
245 u32 val;
246
247 if ((idx != ARMV7_CYCLE_COUNTER) &&
248 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
249 pr_err("CPU%u enabling wrong PMNC counter"
250 " interrupt enable %d\n", raw_smp_processor_id(), idx);
251 return -1;
252 }
253
254 if (idx == ARMV7_CYCLE_COUNTER)
255 val = ARMV7_INTENS_C;
256 else
257 val = ARMV7_INTENS_P(idx);
258
259 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
260
261 return idx;
262 }
263
264 static inline u32 armv7_pmnc_disable_interrupt(unsigned int idx)
265 {
266 u32 val;
267
268 if ((idx != ARMV7_CYCLE_COUNTER) &&
269 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
270 pr_err("CPU%u disabling wrong PMNC counter"
271 " interrupt enable %d\n", raw_smp_processor_id(), idx);
272 return -1;
273 }
274
275 if (idx == ARMV7_CYCLE_COUNTER)
276 val = ARMV7_INTENC_C;
277 else
278 val = ARMV7_INTENC_P(idx);
279
280 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
281
282 return idx;
283 }
284 #endif
285
286 /*
287 * armv7_pmnc_get_overflow_status: get the overflow status.
288 * :
289 */
290 static u32 armv7_pmnc_get_overflow_status(void)
291 {
292 u32 val;
293 /* Read */
294 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
295
296 /* Write to clear flags */
297 val &= ARMV7_FLAG_MASK;
298 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
299
300 return val;
301 }
302
303 #ifdef DEBUG
304 static void armv7_pmnc_dump_regs(void)
305 {
306 u32 val;
307 unsigned int cnt;
308
309 printk(KERN_INFO "PMNC registers dump:\n");
310
311 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
312 printk(KERN_INFO "PMNC =0x%08x\n", val);
313
314 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
315 printk(KERN_INFO "CNTENS=0x%08x\n", val);
316
317 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
318 printk(KERN_INFO "INTENS=0x%08x\n", val);
319
320 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
321 printk(KERN_INFO "FLAGS =0x%08x\n", val);
322
323 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
324 printk(KERN_INFO "SELECT=0x%08x\n", val);
325
326 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
327 printk(KERN_INFO "CCNT =0x%08x\n", val);
328
329 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
330 armv7_pmnc_select_counter(cnt);
331 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
332 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
333 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
334 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
335 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
336 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
337 }
338 }
339 #endif
340 /*
341 * armv7pmu_enable_event: enable the selected performance counter register
342 * @event: selected event
343 * @idx: selected performance counter register
344 */
345
346 static void armv7pmu_enable_event(u32 event , int idx)
347 {
348
349 /*
350 * Enable counter and interrupt, and set the counter to count
351 * the event that we're interested in.
352 */
353
354
355 /*
356 * Disable counter
357 */
358 armv7_pmnc_disable_counter(idx);
359
360 /*
361 * Set event (if destined for PMNx counters)
362 * We don't need to set the event if it's a cycle count
363 */
364 if (idx != ARMV7_CYCLE_COUNTER)
365 armv7_pmnc_write_evtsel(idx, event);
366
367 /*
368 * Enable interrupt for this counter
369 */
370 //armv7_pmnc_enable_interrupt(idx);
371
372 /*
373 * Enable counter
374 */
375 armv7_pmnc_enable_counter(idx);
376
377
378 }
379
380 /*
381 * armv7pmu_disable_event: disable the selected performance counter register
382 * @event: selected event
383 * @idx: selected performance counter register
384 */
385 static void armv7pmu_disable_event(u32 event, int idx)
386 {
387
388 /*
389 * Disable counter and interrupt
390 */
391
392
393 /*
394 * Disable counter
395 */
396 armv7_pmnc_disable_counter(idx);
397
398 /*
399 * Disable interrupt for this counter
400 */
401 //armv7_pmnc_disable_interrupt(idx);
402
403
404 }
405
406 /*
407 * armv7pmu_start:All counters, including PMCCNTR, are enabled.
408 * @info:NULL
409 */
410 static void armv7pmu_start(void *info)
411 {
412 /* Enable all counters */
413 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
414 }
415
416 /*
417 * armv7pmu_stop:All counters, including PMCCNTR, are disabled.
418 * @info:NULL
419 * @info:
420 */
421 static void armv7pmu_stop(void *info)
422 {
423 /* Disable all counters */
424 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
425 }
426
427 /*
428 * armv7pmu_reset:All counters, including PMCCNTR, are reseted.
429 * @info:NULL
430 */
431 static void armv7pmu_reset(void *info)
432 {
433
434 /* The counter and interrupt enable registers are unknown at reset. */
435 //for (idx = 0; idx < NUMBER_OF_EVENT; ++idx)
436 // armv7pmu_disable_event(NULL, idx);
437
438 //armv7_pmnc_disable_counter(ARMV7_CYCLE_COUNTER);
439
440 /* Initialize & Reset PMNC: C and P bits */
441 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
442 }
443
444 /*
445 * armv7pmu_enable: eanble all event, All counters, including PMCCNTR, are enabled
446 * @info:NULL
447 */
448 static void armv7pmu_enable(void *info)
449 {
450 int idx;
451 struct pmu_cfg *p_cfg= (struct pmu_cfg *) &armv7pmu.perf_cfg;
452
453 armv7pmu_reset(NULL);
454
455 if(event_mask >> 31)
456 armv7_pmnc_enable_counter(ARMV7_CYCLE_COUNTER);
457 else
458 armv7_pmnc_disable_counter(ARMV7_CYCLE_COUNTER);
459
460 for (idx = 0; idx < NUMBER_OF_EVENT; idx++) {
461 if( (event_mask >> idx) & EVENT_MASK )
462 armv7pmu_enable_event(p_cfg->event_cfg[idx], idx);
463 else
464 armv7pmu_disable_event(0, idx);
465 }
466 }
467
468 /*
469 * armv7pmu_read_all_counter: read all counter, including PMCCNTR
470 * @info:NULL
471 */
472 static void armv7pmu_read_all_counter(void *info)
473 {
474 int idx, cpu = raw_smp_processor_id();
475 struct pmu_data *p_data = (struct pmu_data *) &armv7pmu.perf_data;
476
477 for (idx = 0; idx < NUMBER_OF_EVENT + 1; idx++){
478 p_data->cnt_val[cpu][idx] = armv7pmu_read_counter(idx);
479 }
480
481 p_data->overflow[cpu] = armv7_pmnc_get_overflow_status();
482 }
483
484 static void smp_pmu_stop(void)
485 {
486 #ifdef CONFIG_SMP
487 int i;
488 if(armv7pmu.multicore)
489 {
490 for(i = 0; i < NUMBER_OF_CPU; i++)
491 mtk_smp_call_function_single(i, armv7pmu_stop, NULL, 1);
492 } else
493 #endif
494 armv7pmu_stop(NULL);
495 }
496
497 static void smp_pmu_start(void)
498 {
499 #ifdef CONFIG_SMP
500 int i;
501 if(armv7pmu.multicore)
502 {
503 for(i = 0; i < NUMBER_OF_CPU; i++)
504 mtk_smp_call_function_single(i, armv7pmu_start, NULL, 1);
505 } else
506 #endif
507 armv7pmu_start(NULL);
508 }
509
510 static void smp_pmu_reset(void)
511 {
512 #ifdef CONFIG_SMP
513 int i;
514 if(armv7pmu.multicore)
515 {
516 for(i = 0; i < NUMBER_OF_CPU; i++)
517 mtk_smp_call_function_single(i, armv7pmu_reset, NULL, 1);
518 } else
519 #endif
520 armv7pmu_reset(NULL);
521 }
522
523 static void smp_pmu_enable_event(void)
524 {
525 #ifdef CONFIG_SMP
526 int i;
527 if(armv7pmu.multicore)
528 {
529 for(i = 0; i < NUMBER_OF_CPU; i++)
530 mtk_smp_call_function_single(i, armv7pmu_enable, NULL, 1);
531 } else
532 #endif
533 armv7pmu_enable(NULL);
534 }
535
536 /*static void smp_pmu_disable_event(void)
537 {
538
539 }*/
540
541 static void smp_pmu_read_counter(void)
542 {
543 #ifdef CONFIG_SMP
544 int i;
545 if(armv7pmu.multicore)
546 {
547 for(i = 0; i < NUMBER_OF_CPU; i++)
548 mtk_smp_call_function_single(i, armv7pmu_read_all_counter, NULL, 1);
549 } else
550 #endif
551 armv7pmu_read_all_counter(NULL);
552 }
553
554 int register_pmu(struct arm_pmu **p_pmu)
555 {
556
557 *p_pmu = &armv7pmu;
558 return 0;
559 }
560
561
562 void unregister_pmu(struct arm_pmu **p_pmu)
563 {
564 *p_pmu = NULL;
565 }
566
567
568