Commit | Line | Data |
---|---|---|
43eab878 WD |
1 | /* |
2 | * ARMv5 [xscale] Performance counter handling code. | |
3 | * | |
4 | * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com> | |
5 | * | |
6 | * Based on the previous xscale OProfile code. | |
7 | * | |
8 | * There are two variants of the xscale PMU that we support: | |
9 | * - xscale1pmu: 2 event counters and a cycle counter | |
10 | * - xscale2pmu: 4 event counters and a cycle counter | |
11 | * The two variants share event definitions, but have different | |
12 | * PMU structures. | |
13 | */ | |
14 | ||
15 | #ifdef CONFIG_CPU_XSCALE | |
16 | enum xscale_perf_types { | |
17 | XSCALE_PERFCTR_ICACHE_MISS = 0x00, | |
18 | XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, | |
19 | XSCALE_PERFCTR_DATA_STALL = 0x02, | |
20 | XSCALE_PERFCTR_ITLB_MISS = 0x03, | |
21 | XSCALE_PERFCTR_DTLB_MISS = 0x04, | |
22 | XSCALE_PERFCTR_BRANCH = 0x05, | |
23 | XSCALE_PERFCTR_BRANCH_MISS = 0x06, | |
24 | XSCALE_PERFCTR_INSTRUCTION = 0x07, | |
25 | XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, | |
26 | XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, | |
27 | XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, | |
28 | XSCALE_PERFCTR_DCACHE_MISS = 0x0B, | |
29 | XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, | |
30 | XSCALE_PERFCTR_PC_CHANGED = 0x0D, | |
31 | XSCALE_PERFCTR_BCU_REQUEST = 0x10, | |
32 | XSCALE_PERFCTR_BCU_FULL = 0x11, | |
33 | XSCALE_PERFCTR_BCU_DRAIN = 0x12, | |
34 | XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, | |
35 | XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, | |
36 | XSCALE_PERFCTR_RMW = 0x16, | |
37 | /* XSCALE_PERFCTR_CCNT is not hardware defined */ | |
38 | XSCALE_PERFCTR_CCNT = 0xFE, | |
39 | XSCALE_PERFCTR_UNUSED = 0xFF, | |
40 | }; | |
41 | ||
42 | enum xscale_counters { | |
d2b41f74 | 43 | XSCALE_CYCLE_COUNTER = 0, |
43eab878 WD |
44 | XSCALE_COUNTER0, |
45 | XSCALE_COUNTER1, | |
46 | XSCALE_COUNTER2, | |
47 | XSCALE_COUNTER3, | |
48 | }; | |
49 | ||
50 | static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { | |
51 | [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, | |
52 | [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, | |
53 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | |
54 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | |
55 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, | |
56 | [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, | |
57 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | |
58 | }; | |
59 | ||
60 | static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
61 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
62 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
63 | [C(L1D)] = { | |
64 | [C(OP_READ)] = { | |
65 | [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, | |
66 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, | |
67 | }, | |
68 | [C(OP_WRITE)] = { | |
69 | [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, | |
70 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, | |
71 | }, | |
72 | [C(OP_PREFETCH)] = { | |
73 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
74 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
75 | }, | |
76 | }, | |
77 | [C(L1I)] = { | |
78 | [C(OP_READ)] = { | |
79 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
80 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | |
81 | }, | |
82 | [C(OP_WRITE)] = { | |
83 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
84 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | |
85 | }, | |
86 | [C(OP_PREFETCH)] = { | |
87 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
88 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
89 | }, | |
90 | }, | |
91 | [C(LL)] = { | |
92 | [C(OP_READ)] = { | |
93 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
94 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
95 | }, | |
96 | [C(OP_WRITE)] = { | |
97 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
98 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
99 | }, | |
100 | [C(OP_PREFETCH)] = { | |
101 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
102 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
103 | }, | |
104 | }, | |
105 | [C(DTLB)] = { | |
106 | [C(OP_READ)] = { | |
107 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
108 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, | |
109 | }, | |
110 | [C(OP_WRITE)] = { | |
111 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
112 | [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, | |
113 | }, | |
114 | [C(OP_PREFETCH)] = { | |
115 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
116 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
117 | }, | |
118 | }, | |
119 | [C(ITLB)] = { | |
120 | [C(OP_READ)] = { | |
121 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
122 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, | |
123 | }, | |
124 | [C(OP_WRITE)] = { | |
125 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
126 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, | |
127 | }, | |
128 | [C(OP_PREFETCH)] = { | |
129 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
130 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
131 | }, | |
132 | }, | |
133 | [C(BPU)] = { | |
134 | [C(OP_READ)] = { | |
135 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
136 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
137 | }, | |
138 | [C(OP_WRITE)] = { | |
139 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
140 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
141 | }, | |
142 | [C(OP_PREFETCH)] = { | |
143 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
144 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
145 | }, | |
146 | }, | |
89d6c0b5 PZ |
147 | [C(NODE)] = { |
148 | [C(OP_READ)] = { | |
149 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
150 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
151 | }, | |
152 | [C(OP_WRITE)] = { | |
153 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
154 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
155 | }, | |
156 | [C(OP_PREFETCH)] = { | |
157 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
158 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
159 | }, | |
160 | }, | |
43eab878 WD |
161 | }; |
162 | ||
163 | #define XSCALE_PMU_ENABLE 0x001 | |
164 | #define XSCALE_PMN_RESET 0x002 | |
165 | #define XSCALE_CCNT_RESET 0x004 | |
166 | #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) | |
167 | #define XSCALE_PMU_CNT64 0x008 | |
168 | ||
169 | #define XSCALE1_OVERFLOWED_MASK 0x700 | |
170 | #define XSCALE1_CCOUNT_OVERFLOW 0x400 | |
171 | #define XSCALE1_COUNT0_OVERFLOW 0x100 | |
172 | #define XSCALE1_COUNT1_OVERFLOW 0x200 | |
173 | #define XSCALE1_CCOUNT_INT_EN 0x040 | |
174 | #define XSCALE1_COUNT0_INT_EN 0x010 | |
175 | #define XSCALE1_COUNT1_INT_EN 0x020 | |
176 | #define XSCALE1_COUNT0_EVT_SHFT 12 | |
177 | #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) | |
178 | #define XSCALE1_COUNT1_EVT_SHFT 20 | |
179 | #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) | |
180 | ||
181 | static inline u32 | |
182 | xscale1pmu_read_pmnc(void) | |
183 | { | |
184 | u32 val; | |
185 | asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); | |
186 | return val; | |
187 | } | |
188 | ||
189 | static inline void | |
190 | xscale1pmu_write_pmnc(u32 val) | |
191 | { | |
192 | /* upper 4bits and 7, 11 are write-as-0 */ | |
193 | val &= 0xffff77f; | |
194 | asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); | |
195 | } | |
196 | ||
197 | static inline int | |
198 | xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, | |
199 | enum xscale_counters counter) | |
200 | { | |
201 | int ret = 0; | |
202 | ||
203 | switch (counter) { | |
204 | case XSCALE_CYCLE_COUNTER: | |
205 | ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; | |
206 | break; | |
207 | case XSCALE_COUNTER0: | |
208 | ret = pmnc & XSCALE1_COUNT0_OVERFLOW; | |
209 | break; | |
210 | case XSCALE_COUNTER1: | |
211 | ret = pmnc & XSCALE1_COUNT1_OVERFLOW; | |
212 | break; | |
213 | default: | |
214 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
215 | } | |
216 | ||
217 | return ret; | |
218 | } | |
219 | ||
220 | static irqreturn_t | |
221 | xscale1pmu_handle_irq(int irq_num, void *dev) | |
222 | { | |
223 | unsigned long pmnc; | |
224 | struct perf_sample_data data; | |
225 | struct cpu_hw_events *cpuc; | |
226 | struct pt_regs *regs; | |
227 | int idx; | |
228 | ||
229 | /* | |
230 | * NOTE: there's an A stepping erratum that states if an overflow | |
231 | * bit already exists and another occurs, the previous | |
232 | * Overflow bit gets cleared. There's no workaround. | |
233 | * Fixed in B stepping or later. | |
234 | */ | |
235 | pmnc = xscale1pmu_read_pmnc(); | |
236 | ||
237 | /* | |
238 | * Write the value back to clear the overflow flags. Overflow | |
239 | * flags remain in pmnc for use below. We also disable the PMU | |
240 | * while we process the interrupt. | |
241 | */ | |
242 | xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); | |
243 | ||
244 | if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) | |
245 | return IRQ_NONE; | |
246 | ||
247 | regs = get_irq_regs(); | |
248 | ||
249 | perf_sample_data_init(&data, 0); | |
250 | ||
251 | cpuc = &__get_cpu_var(cpu_hw_events); | |
d2b41f74 | 252 | for (idx = 0; idx < armpmu->num_events; ++idx) { |
43eab878 WD |
253 | struct perf_event *event = cpuc->events[idx]; |
254 | struct hw_perf_event *hwc; | |
255 | ||
43eab878 WD |
256 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) |
257 | continue; | |
258 | ||
259 | hwc = &event->hw; | |
a737823d | 260 | armpmu_event_update(event, hwc, idx, 1); |
43eab878 WD |
261 | data.period = event->hw.last_period; |
262 | if (!armpmu_event_set_period(event, hwc, idx)) | |
263 | continue; | |
264 | ||
a8b0ca17 | 265 | if (perf_event_overflow(event, &data, regs)) |
43eab878 WD |
266 | armpmu->disable(hwc, idx); |
267 | } | |
268 | ||
269 | irq_work_run(); | |
270 | ||
271 | /* | |
272 | * Re-enable the PMU. | |
273 | */ | |
274 | pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; | |
275 | xscale1pmu_write_pmnc(pmnc); | |
276 | ||
277 | return IRQ_HANDLED; | |
278 | } | |
279 | ||
280 | static void | |
281 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |
282 | { | |
283 | unsigned long val, mask, evt, flags; | |
0f78d2d5 | 284 | struct cpu_hw_events *events = armpmu->get_hw_events(); |
43eab878 WD |
285 | |
286 | switch (idx) { | |
287 | case XSCALE_CYCLE_COUNTER: | |
288 | mask = 0; | |
289 | evt = XSCALE1_CCOUNT_INT_EN; | |
290 | break; | |
291 | case XSCALE_COUNTER0: | |
292 | mask = XSCALE1_COUNT0_EVT_MASK; | |
293 | evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | | |
294 | XSCALE1_COUNT0_INT_EN; | |
295 | break; | |
296 | case XSCALE_COUNTER1: | |
297 | mask = XSCALE1_COUNT1_EVT_MASK; | |
298 | evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | | |
299 | XSCALE1_COUNT1_INT_EN; | |
300 | break; | |
301 | default: | |
302 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
303 | return; | |
304 | } | |
305 | ||
0f78d2d5 | 306 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
307 | val = xscale1pmu_read_pmnc(); |
308 | val &= ~mask; | |
309 | val |= evt; | |
310 | xscale1pmu_write_pmnc(val); | |
0f78d2d5 | 311 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
312 | } |
313 | ||
314 | static void | |
315 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |
316 | { | |
317 | unsigned long val, mask, evt, flags; | |
0f78d2d5 | 318 | struct cpu_hw_events *events = armpmu->get_hw_events(); |
43eab878 WD |
319 | |
320 | switch (idx) { | |
321 | case XSCALE_CYCLE_COUNTER: | |
322 | mask = XSCALE1_CCOUNT_INT_EN; | |
323 | evt = 0; | |
324 | break; | |
325 | case XSCALE_COUNTER0: | |
326 | mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; | |
327 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; | |
328 | break; | |
329 | case XSCALE_COUNTER1: | |
330 | mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; | |
331 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; | |
332 | break; | |
333 | default: | |
334 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
335 | return; | |
336 | } | |
337 | ||
0f78d2d5 | 338 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
339 | val = xscale1pmu_read_pmnc(); |
340 | val &= ~mask; | |
341 | val |= evt; | |
342 | xscale1pmu_write_pmnc(val); | |
0f78d2d5 | 343 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
344 | } |
345 | ||
346 | static int | |
347 | xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, | |
348 | struct hw_perf_event *event) | |
349 | { | |
350 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | |
351 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) | |
352 | return -EAGAIN; | |
353 | ||
354 | return XSCALE_CYCLE_COUNTER; | |
355 | } else { | |
356 | if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) | |
357 | return XSCALE_COUNTER1; | |
358 | ||
359 | if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) | |
360 | return XSCALE_COUNTER0; | |
361 | ||
362 | return -EAGAIN; | |
363 | } | |
364 | } | |
365 | ||
366 | static void | |
367 | xscale1pmu_start(void) | |
368 | { | |
369 | unsigned long flags, val; | |
0f78d2d5 | 370 | struct cpu_hw_events *events = armpmu->get_hw_events(); |
43eab878 | 371 | |
0f78d2d5 | 372 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
373 | val = xscale1pmu_read_pmnc(); |
374 | val |= XSCALE_PMU_ENABLE; | |
375 | xscale1pmu_write_pmnc(val); | |
0f78d2d5 | 376 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
377 | } |
378 | ||
379 | static void | |
380 | xscale1pmu_stop(void) | |
381 | { | |
382 | unsigned long flags, val; | |
0f78d2d5 | 383 | struct cpu_hw_events *events = armpmu->get_hw_events(); |
43eab878 | 384 | |
0f78d2d5 | 385 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
386 | val = xscale1pmu_read_pmnc(); |
387 | val &= ~XSCALE_PMU_ENABLE; | |
388 | xscale1pmu_write_pmnc(val); | |
0f78d2d5 | 389 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
390 | } |
391 | ||
392 | static inline u32 | |
393 | xscale1pmu_read_counter(int counter) | |
394 | { | |
395 | u32 val = 0; | |
396 | ||
397 | switch (counter) { | |
398 | case XSCALE_CYCLE_COUNTER: | |
399 | asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); | |
400 | break; | |
401 | case XSCALE_COUNTER0: | |
402 | asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); | |
403 | break; | |
404 | case XSCALE_COUNTER1: | |
405 | asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); | |
406 | break; | |
407 | } | |
408 | ||
409 | return val; | |
410 | } | |
411 | ||
412 | static inline void | |
413 | xscale1pmu_write_counter(int counter, u32 val) | |
414 | { | |
415 | switch (counter) { | |
416 | case XSCALE_CYCLE_COUNTER: | |
417 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); | |
418 | break; | |
419 | case XSCALE_COUNTER0: | |
420 | asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); | |
421 | break; | |
422 | case XSCALE_COUNTER1: | |
423 | asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); | |
424 | break; | |
425 | } | |
426 | } | |
427 | ||
a6c93afe | 428 | static struct arm_pmu xscale1pmu = { |
43eab878 WD |
429 | .id = ARM_PERF_PMU_ID_XSCALE1, |
430 | .name = "xscale1", | |
431 | .handle_irq = xscale1pmu_handle_irq, | |
432 | .enable = xscale1pmu_enable_event, | |
433 | .disable = xscale1pmu_disable_event, | |
434 | .read_counter = xscale1pmu_read_counter, | |
435 | .write_counter = xscale1pmu_write_counter, | |
436 | .get_event_idx = xscale1pmu_get_event_idx, | |
437 | .start = xscale1pmu_start, | |
438 | .stop = xscale1pmu_stop, | |
439 | .cache_map = &xscale_perf_cache_map, | |
440 | .event_map = &xscale_perf_map, | |
441 | .raw_event_mask = 0xFF, | |
442 | .num_events = 3, | |
443 | .max_period = (1LLU << 32) - 1, | |
444 | }; | |
445 | ||
a6c93afe | 446 | static struct arm_pmu *__init xscale1pmu_init(void) |
43eab878 WD |
447 | { |
448 | return &xscale1pmu; | |
449 | } | |
450 | ||
451 | #define XSCALE2_OVERFLOWED_MASK 0x01f | |
452 | #define XSCALE2_CCOUNT_OVERFLOW 0x001 | |
453 | #define XSCALE2_COUNT0_OVERFLOW 0x002 | |
454 | #define XSCALE2_COUNT1_OVERFLOW 0x004 | |
455 | #define XSCALE2_COUNT2_OVERFLOW 0x008 | |
456 | #define XSCALE2_COUNT3_OVERFLOW 0x010 | |
457 | #define XSCALE2_CCOUNT_INT_EN 0x001 | |
458 | #define XSCALE2_COUNT0_INT_EN 0x002 | |
459 | #define XSCALE2_COUNT1_INT_EN 0x004 | |
460 | #define XSCALE2_COUNT2_INT_EN 0x008 | |
461 | #define XSCALE2_COUNT3_INT_EN 0x010 | |
462 | #define XSCALE2_COUNT0_EVT_SHFT 0 | |
463 | #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) | |
464 | #define XSCALE2_COUNT1_EVT_SHFT 8 | |
465 | #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) | |
466 | #define XSCALE2_COUNT2_EVT_SHFT 16 | |
467 | #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) | |
468 | #define XSCALE2_COUNT3_EVT_SHFT 24 | |
469 | #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) | |
470 | ||
471 | static inline u32 | |
472 | xscale2pmu_read_pmnc(void) | |
473 | { | |
474 | u32 val; | |
475 | asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); | |
476 | /* bits 1-2 and 4-23 are read-unpredictable */ | |
477 | return val & 0xff000009; | |
478 | } | |
479 | ||
480 | static inline void | |
481 | xscale2pmu_write_pmnc(u32 val) | |
482 | { | |
483 | /* bits 4-23 are write-as-0, 24-31 are write ignored */ | |
484 | val &= 0xf; | |
485 | asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); | |
486 | } | |
487 | ||
488 | static inline u32 | |
489 | xscale2pmu_read_overflow_flags(void) | |
490 | { | |
491 | u32 val; | |
492 | asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); | |
493 | return val; | |
494 | } | |
495 | ||
496 | static inline void | |
497 | xscale2pmu_write_overflow_flags(u32 val) | |
498 | { | |
499 | asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); | |
500 | } | |
501 | ||
502 | static inline u32 | |
503 | xscale2pmu_read_event_select(void) | |
504 | { | |
505 | u32 val; | |
506 | asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); | |
507 | return val; | |
508 | } | |
509 | ||
510 | static inline void | |
511 | xscale2pmu_write_event_select(u32 val) | |
512 | { | |
513 | asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); | |
514 | } | |
515 | ||
516 | static inline u32 | |
517 | xscale2pmu_read_int_enable(void) | |
518 | { | |
519 | u32 val; | |
520 | asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); | |
521 | return val; | |
522 | } | |
523 | ||
524 | static void | |
525 | xscale2pmu_write_int_enable(u32 val) | |
526 | { | |
527 | asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); | |
528 | } | |
529 | ||
530 | static inline int | |
531 | xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, | |
532 | enum xscale_counters counter) | |
533 | { | |
534 | int ret = 0; | |
535 | ||
536 | switch (counter) { | |
537 | case XSCALE_CYCLE_COUNTER: | |
538 | ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; | |
539 | break; | |
540 | case XSCALE_COUNTER0: | |
541 | ret = of_flags & XSCALE2_COUNT0_OVERFLOW; | |
542 | break; | |
543 | case XSCALE_COUNTER1: | |
544 | ret = of_flags & XSCALE2_COUNT1_OVERFLOW; | |
545 | break; | |
546 | case XSCALE_COUNTER2: | |
547 | ret = of_flags & XSCALE2_COUNT2_OVERFLOW; | |
548 | break; | |
549 | case XSCALE_COUNTER3: | |
550 | ret = of_flags & XSCALE2_COUNT3_OVERFLOW; | |
551 | break; | |
552 | default: | |
553 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
554 | } | |
555 | ||
556 | return ret; | |
557 | } | |
558 | ||
559 | static irqreturn_t | |
560 | xscale2pmu_handle_irq(int irq_num, void *dev) | |
561 | { | |
562 | unsigned long pmnc, of_flags; | |
563 | struct perf_sample_data data; | |
564 | struct cpu_hw_events *cpuc; | |
565 | struct pt_regs *regs; | |
566 | int idx; | |
567 | ||
568 | /* Disable the PMU. */ | |
569 | pmnc = xscale2pmu_read_pmnc(); | |
570 | xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); | |
571 | ||
572 | /* Check the overflow flag register. */ | |
573 | of_flags = xscale2pmu_read_overflow_flags(); | |
574 | if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) | |
575 | return IRQ_NONE; | |
576 | ||
577 | /* Clear the overflow bits. */ | |
578 | xscale2pmu_write_overflow_flags(of_flags); | |
579 | ||
580 | regs = get_irq_regs(); | |
581 | ||
582 | perf_sample_data_init(&data, 0); | |
583 | ||
584 | cpuc = &__get_cpu_var(cpu_hw_events); | |
d2b41f74 | 585 | for (idx = 0; idx < armpmu->num_events; ++idx) { |
43eab878 WD |
586 | struct perf_event *event = cpuc->events[idx]; |
587 | struct hw_perf_event *hwc; | |
588 | ||
43eab878 WD |
589 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) |
590 | continue; | |
591 | ||
592 | hwc = &event->hw; | |
a737823d | 593 | armpmu_event_update(event, hwc, idx, 1); |
43eab878 WD |
594 | data.period = event->hw.last_period; |
595 | if (!armpmu_event_set_period(event, hwc, idx)) | |
596 | continue; | |
597 | ||
a8b0ca17 | 598 | if (perf_event_overflow(event, &data, regs)) |
43eab878 WD |
599 | armpmu->disable(hwc, idx); |
600 | } | |
601 | ||
602 | irq_work_run(); | |
603 | ||
604 | /* | |
605 | * Re-enable the PMU. | |
606 | */ | |
607 | pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; | |
608 | xscale2pmu_write_pmnc(pmnc); | |
609 | ||
610 | return IRQ_HANDLED; | |
611 | } | |
612 | ||
613 | static void | |
614 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |
615 | { | |
616 | unsigned long flags, ien, evtsel; | |
0f78d2d5 | 617 | struct cpu_hw_events *events = armpmu->get_hw_events(); |
43eab878 WD |
618 | |
619 | ien = xscale2pmu_read_int_enable(); | |
620 | evtsel = xscale2pmu_read_event_select(); | |
621 | ||
622 | switch (idx) { | |
623 | case XSCALE_CYCLE_COUNTER: | |
624 | ien |= XSCALE2_CCOUNT_INT_EN; | |
625 | break; | |
626 | case XSCALE_COUNTER0: | |
627 | ien |= XSCALE2_COUNT0_INT_EN; | |
628 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | |
629 | evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; | |
630 | break; | |
631 | case XSCALE_COUNTER1: | |
632 | ien |= XSCALE2_COUNT1_INT_EN; | |
633 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | |
634 | evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; | |
635 | break; | |
636 | case XSCALE_COUNTER2: | |
637 | ien |= XSCALE2_COUNT2_INT_EN; | |
638 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | |
639 | evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; | |
640 | break; | |
641 | case XSCALE_COUNTER3: | |
642 | ien |= XSCALE2_COUNT3_INT_EN; | |
643 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | |
644 | evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; | |
645 | break; | |
646 | default: | |
647 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
648 | return; | |
649 | } | |
650 | ||
0f78d2d5 | 651 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
652 | xscale2pmu_write_event_select(evtsel); |
653 | xscale2pmu_write_int_enable(ien); | |
0f78d2d5 | 654 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
655 | } |
656 | ||
657 | static void | |
658 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |
659 | { | |
660 | unsigned long flags, ien, evtsel; | |
0f78d2d5 | 661 | struct cpu_hw_events *events = armpmu->get_hw_events(); |
43eab878 WD |
662 | |
663 | ien = xscale2pmu_read_int_enable(); | |
664 | evtsel = xscale2pmu_read_event_select(); | |
665 | ||
666 | switch (idx) { | |
667 | case XSCALE_CYCLE_COUNTER: | |
668 | ien &= ~XSCALE2_CCOUNT_INT_EN; | |
669 | break; | |
670 | case XSCALE_COUNTER0: | |
671 | ien &= ~XSCALE2_COUNT0_INT_EN; | |
672 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | |
673 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; | |
674 | break; | |
675 | case XSCALE_COUNTER1: | |
676 | ien &= ~XSCALE2_COUNT1_INT_EN; | |
677 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | |
678 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; | |
679 | break; | |
680 | case XSCALE_COUNTER2: | |
681 | ien &= ~XSCALE2_COUNT2_INT_EN; | |
682 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | |
683 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; | |
684 | break; | |
685 | case XSCALE_COUNTER3: | |
686 | ien &= ~XSCALE2_COUNT3_INT_EN; | |
687 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | |
688 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; | |
689 | break; | |
690 | default: | |
691 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
692 | return; | |
693 | } | |
694 | ||
0f78d2d5 | 695 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
696 | xscale2pmu_write_event_select(evtsel); |
697 | xscale2pmu_write_int_enable(ien); | |
0f78d2d5 | 698 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
699 | } |
700 | ||
701 | static int | |
702 | xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, | |
703 | struct hw_perf_event *event) | |
704 | { | |
705 | int idx = xscale1pmu_get_event_idx(cpuc, event); | |
706 | if (idx >= 0) | |
707 | goto out; | |
708 | ||
709 | if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) | |
710 | idx = XSCALE_COUNTER3; | |
711 | else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) | |
712 | idx = XSCALE_COUNTER2; | |
713 | out: | |
714 | return idx; | |
715 | } | |
716 | ||
717 | static void | |
718 | xscale2pmu_start(void) | |
719 | { | |
720 | unsigned long flags, val; | |
0f78d2d5 | 721 | struct cpu_hw_events *events = armpmu->get_hw_events(); |
43eab878 | 722 | |
0f78d2d5 | 723 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
724 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
725 | val |= XSCALE_PMU_ENABLE; | |
726 | xscale2pmu_write_pmnc(val); | |
0f78d2d5 | 727 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
728 | } |
729 | ||
730 | static void | |
731 | xscale2pmu_stop(void) | |
732 | { | |
733 | unsigned long flags, val; | |
0f78d2d5 | 734 | struct cpu_hw_events *events = armpmu->get_hw_events(); |
43eab878 | 735 | |
0f78d2d5 | 736 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
737 | val = xscale2pmu_read_pmnc(); |
738 | val &= ~XSCALE_PMU_ENABLE; | |
739 | xscale2pmu_write_pmnc(val); | |
0f78d2d5 | 740 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
741 | } |
742 | ||
743 | static inline u32 | |
744 | xscale2pmu_read_counter(int counter) | |
745 | { | |
746 | u32 val = 0; | |
747 | ||
748 | switch (counter) { | |
749 | case XSCALE_CYCLE_COUNTER: | |
750 | asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); | |
751 | break; | |
752 | case XSCALE_COUNTER0: | |
753 | asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); | |
754 | break; | |
755 | case XSCALE_COUNTER1: | |
756 | asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); | |
757 | break; | |
758 | case XSCALE_COUNTER2: | |
759 | asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); | |
760 | break; | |
761 | case XSCALE_COUNTER3: | |
762 | asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); | |
763 | break; | |
764 | } | |
765 | ||
766 | return val; | |
767 | } | |
768 | ||
769 | static inline void | |
770 | xscale2pmu_write_counter(int counter, u32 val) | |
771 | { | |
772 | switch (counter) { | |
773 | case XSCALE_CYCLE_COUNTER: | |
774 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); | |
775 | break; | |
776 | case XSCALE_COUNTER0: | |
777 | asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); | |
778 | break; | |
779 | case XSCALE_COUNTER1: | |
780 | asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); | |
781 | break; | |
782 | case XSCALE_COUNTER2: | |
783 | asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); | |
784 | break; | |
785 | case XSCALE_COUNTER3: | |
786 | asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); | |
787 | break; | |
788 | } | |
789 | } | |
790 | ||
a6c93afe | 791 | static struct arm_pmu xscale2pmu = { |
43eab878 WD |
792 | .id = ARM_PERF_PMU_ID_XSCALE2, |
793 | .name = "xscale2", | |
794 | .handle_irq = xscale2pmu_handle_irq, | |
795 | .enable = xscale2pmu_enable_event, | |
796 | .disable = xscale2pmu_disable_event, | |
797 | .read_counter = xscale2pmu_read_counter, | |
798 | .write_counter = xscale2pmu_write_counter, | |
799 | .get_event_idx = xscale2pmu_get_event_idx, | |
800 | .start = xscale2pmu_start, | |
801 | .stop = xscale2pmu_stop, | |
802 | .cache_map = &xscale_perf_cache_map, | |
803 | .event_map = &xscale_perf_map, | |
804 | .raw_event_mask = 0xFF, | |
805 | .num_events = 5, | |
806 | .max_period = (1LLU << 32) - 1, | |
807 | }; | |
808 | ||
a6c93afe | 809 | static struct arm_pmu *__init xscale2pmu_init(void) |
43eab878 WD |
810 | { |
811 | return &xscale2pmu; | |
812 | } | |
813 | #else | |
a6c93afe | 814 | static struct arm_pmu *__init xscale1pmu_init(void) |
43eab878 WD |
815 | { |
816 | return NULL; | |
817 | } | |
818 | ||
a6c93afe | 819 | static struct arm_pmu *__init xscale2pmu_init(void) |
43eab878 WD |
820 | { |
821 | return NULL; | |
822 | } | |
823 | #endif /* CONFIG_CPU_XSCALE */ |