Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / perf_event_v7.c
CommitLineData
43eab878
WD
1/*
2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
3 *
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
6 *
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
9 *
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
14 *
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
17 */
18
19#ifdef CONFIG_CPU_V7
a505addc 20
6d4eaf99
WD
21/*
22 * Common ARMv7 event types
23 *
24 * Note: An implementation may not be able to count all of these events
25 * but the encodings are considered to be `reserved' in the case that
26 * they are not available.
27 */
43eab878 28enum armv7_perf_types {
4d301512
WD
29 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
30 ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
31 ARMV7_PERFCTR_ITLB_REFILL = 0x02,
32 ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
33 ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
34 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
35 ARMV7_PERFCTR_MEM_READ = 0x06,
36 ARMV7_PERFCTR_MEM_WRITE = 0x07,
37 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
38 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
39 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
40 ARMV7_PERFCTR_CID_WRITE = 0x0B,
41
42 /*
43 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
43eab878 44 * It counts:
4d301512 45 * - all (taken) branch instructions,
43eab878
WD
46 * - instructions that explicitly write the PC,
47 * - exception generating instructions.
48 */
4d301512
WD
49 ARMV7_PERFCTR_PC_WRITE = 0x0C,
50 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
51 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
52 ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
53 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
54 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
55 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
6d4eaf99
WD
56
57 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
4d301512
WD
58 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
59 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
60 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
61 ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
62 ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
63 ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
64 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
65 ARMV7_PERFCTR_MEM_ERROR = 0x1A,
66 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
67 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
68 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
69
70 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
43eab878
WD
71};
72
73/* ARMv7 Cortex-A8 specific event types */
74enum armv7_a8_perf_types {
4d301512
WD
75 ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
76 ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
77 ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
0445e7a5 78 ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
43eab878
WD
79};
80
81/* ARMv7 Cortex-A9 specific event types */
82enum armv7_a9_perf_types {
4d301512 83 ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
0445e7a5
WD
84 ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
85 ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
43eab878
WD
86};
87
0c205cbe
WD
88/* ARMv7 Cortex-A5 specific event types */
89enum armv7_a5_perf_types {
4d301512
WD
90 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
91 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
0c205cbe
WD
92};
93
14abd038
WD
94/* ARMv7 Cortex-A15 specific event types */
95enum armv7_a15_perf_types {
4d301512
WD
96 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
97 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
98 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
99 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
14abd038 100
4d301512
WD
101 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
102 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
14abd038 103
4d301512
WD
104 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
105 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
106 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
107 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
14abd038 108
4d301512 109 ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
14abd038
WD
110};
111
43eab878
WD
112/*
113 * Cortex-A8 HW events mapping
114 *
115 * The hardware events that we support. We do support cache operations but
116 * we have harvard caches and no way to combine instruction and data
117 * accesses/misses in hardware.
118 */
119static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
0445e7a5
WD
120 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
121 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
122 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
123 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
125 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
126 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
127 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
128 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
43eab878
WD
129};
130
131static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
132 [PERF_COUNT_HW_CACHE_OP_MAX]
133 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
134 [C(L1D)] = {
135 /*
136 * The performance counters don't differentiate between read
137 * and write accesses/misses so this isn't strictly correct,
138 * but it's the best we can do. Writes and reads get
139 * combined.
140 */
141 [C(OP_READ)] = {
4d301512
WD
142 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
143 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
43eab878
WD
144 },
145 [C(OP_WRITE)] = {
4d301512
WD
146 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
147 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
43eab878
WD
148 },
149 [C(OP_PREFETCH)] = {
150 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
151 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
152 },
153 },
154 [C(L1I)] = {
155 [C(OP_READ)] = {
4d301512
WD
156 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
157 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
43eab878
WD
158 },
159 [C(OP_WRITE)] = {
40c390c7
WD
160 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
161 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
43eab878
WD
162 },
163 [C(OP_PREFETCH)] = {
164 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
165 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
166 },
167 },
168 [C(LL)] = {
169 [C(OP_READ)] = {
4d301512
WD
170 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
171 [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
43eab878
WD
172 },
173 [C(OP_WRITE)] = {
4d301512
WD
174 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
175 [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
43eab878
WD
176 },
177 [C(OP_PREFETCH)] = {
178 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
179 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
180 },
181 },
182 [C(DTLB)] = {
43eab878
WD
183 [C(OP_READ)] = {
184 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
185 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
186 },
187 [C(OP_WRITE)] = {
188 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
189 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
190 },
191 [C(OP_PREFETCH)] = {
192 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
193 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
194 },
195 },
196 [C(ITLB)] = {
197 [C(OP_READ)] = {
198 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 199 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
43eab878
WD
200 },
201 [C(OP_WRITE)] = {
202 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 203 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
43eab878
WD
204 },
205 [C(OP_PREFETCH)] = {
206 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
207 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
208 },
209 },
210 [C(BPU)] = {
211 [C(OP_READ)] = {
4d301512
WD
212 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
213 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
43eab878
WD
214 },
215 [C(OP_WRITE)] = {
4d301512
WD
216 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
217 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
43eab878
WD
218 },
219 [C(OP_PREFETCH)] = {
220 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
221 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
222 },
223 },
89d6c0b5
PZ
224 [C(NODE)] = {
225 [C(OP_READ)] = {
226 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
227 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
228 },
229 [C(OP_WRITE)] = {
230 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
231 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
232 },
233 [C(OP_PREFETCH)] = {
234 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
235 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
236 },
237 },
43eab878
WD
238};
239
240/*
241 * Cortex-A9 HW events mapping
242 */
243static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
0445e7a5
WD
244 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
245 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
246 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
247 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
248 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
249 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
250 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
251 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
252 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
43eab878
WD
253};
254
255static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
256 [PERF_COUNT_HW_CACHE_OP_MAX]
257 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
258 [C(L1D)] = {
259 /*
260 * The performance counters don't differentiate between read
261 * and write accesses/misses so this isn't strictly correct,
262 * but it's the best we can do. Writes and reads get
263 * combined.
264 */
265 [C(OP_READ)] = {
4d301512
WD
266 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
267 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
43eab878
WD
268 },
269 [C(OP_WRITE)] = {
4d301512
WD
270 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
271 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
43eab878
WD
272 },
273 [C(OP_PREFETCH)] = {
274 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
275 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
276 },
277 },
278 [C(L1I)] = {
279 [C(OP_READ)] = {
280 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 281 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
43eab878
WD
282 },
283 [C(OP_WRITE)] = {
284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
40c390c7 285 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
43eab878
WD
286 },
287 [C(OP_PREFETCH)] = {
288 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
289 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
290 },
291 },
292 [C(LL)] = {
293 [C(OP_READ)] = {
294 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
295 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
296 },
297 [C(OP_WRITE)] = {
298 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
299 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
300 },
301 [C(OP_PREFETCH)] = {
302 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
303 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
304 },
305 },
306 [C(DTLB)] = {
43eab878
WD
307 [C(OP_READ)] = {
308 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
309 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
310 },
311 [C(OP_WRITE)] = {
312 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
313 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
314 },
315 [C(OP_PREFETCH)] = {
316 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
317 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
318 },
319 },
320 [C(ITLB)] = {
321 [C(OP_READ)] = {
322 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 323 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
43eab878
WD
324 },
325 [C(OP_WRITE)] = {
326 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 327 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
43eab878
WD
328 },
329 [C(OP_PREFETCH)] = {
330 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
331 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
332 },
333 },
334 [C(BPU)] = {
335 [C(OP_READ)] = {
4d301512
WD
336 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
337 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
43eab878
WD
338 },
339 [C(OP_WRITE)] = {
4d301512
WD
340 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
341 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
43eab878
WD
342 },
343 [C(OP_PREFETCH)] = {
344 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
345 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
346 },
347 },
89d6c0b5
PZ
348 [C(NODE)] = {
349 [C(OP_READ)] = {
350 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
351 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
352 },
353 [C(OP_WRITE)] = {
354 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
355 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
356 },
357 [C(OP_PREFETCH)] = {
358 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
359 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
360 },
361 },
43eab878
WD
362};
363
0c205cbe
WD
364/*
365 * Cortex-A5 HW events mapping
366 */
367static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
0445e7a5
WD
368 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
369 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
370 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
371 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
372 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
373 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
374 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
375 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
376 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
0c205cbe
WD
377};
378
379static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
380 [PERF_COUNT_HW_CACHE_OP_MAX]
381 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
382 [C(L1D)] = {
383 [C(OP_READ)] = {
4d301512
WD
384 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
385 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0c205cbe
WD
386 },
387 [C(OP_WRITE)] = {
4d301512
WD
388 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
389 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0c205cbe
WD
390 },
391 [C(OP_PREFETCH)] = {
4d301512
WD
392 [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
393 [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
0c205cbe
WD
394 },
395 },
396 [C(L1I)] = {
397 [C(OP_READ)] = {
398 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
4d301512 399 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
0c205cbe
WD
400 },
401 [C(OP_WRITE)] = {
40c390c7
WD
402 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
403 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
0c205cbe
WD
404 },
405 /*
406 * The prefetch counters don't differentiate between the I
407 * side and the D side.
408 */
409 [C(OP_PREFETCH)] = {
4d301512
WD
410 [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
411 [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
0c205cbe
WD
412 },
413 },
414 [C(LL)] = {
415 [C(OP_READ)] = {
416 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
417 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
418 },
419 [C(OP_WRITE)] = {
420 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
421 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
422 },
423 [C(OP_PREFETCH)] = {
424 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
425 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
426 },
427 },
428 [C(DTLB)] = {
429 [C(OP_READ)] = {
430 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
431 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
432 },
433 [C(OP_WRITE)] = {
434 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
435 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
436 },
437 [C(OP_PREFETCH)] = {
438 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
439 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
440 },
441 },
442 [C(ITLB)] = {
443 [C(OP_READ)] = {
444 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 445 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
0c205cbe
WD
446 },
447 [C(OP_WRITE)] = {
448 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 449 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
0c205cbe
WD
450 },
451 [C(OP_PREFETCH)] = {
452 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
453 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
454 },
455 },
456 [C(BPU)] = {
457 [C(OP_READ)] = {
458 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
4d301512 459 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0c205cbe
WD
460 },
461 [C(OP_WRITE)] = {
462 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
4d301512 463 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0c205cbe
WD
464 },
465 [C(OP_PREFETCH)] = {
466 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
467 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
468 },
469 },
91756acb
WD
470 [C(NODE)] = {
471 [C(OP_READ)] = {
472 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
473 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
474 },
475 [C(OP_WRITE)] = {
476 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
477 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
478 },
479 [C(OP_PREFETCH)] = {
480 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
481 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
482 },
483 },
0c205cbe
WD
484};
485
14abd038
WD
486/*
487 * Cortex-A15 HW events mapping
488 */
489static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
0445e7a5
WD
490 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
491 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
492 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
493 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
494 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
495 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
496 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
497 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
498 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
14abd038
WD
499};
500
501static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
502 [PERF_COUNT_HW_CACHE_OP_MAX]
503 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
504 [C(L1D)] = {
505 [C(OP_READ)] = {
4d301512
WD
506 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
507 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
14abd038
WD
508 },
509 [C(OP_WRITE)] = {
4d301512
WD
510 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
511 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
14abd038
WD
512 },
513 [C(OP_PREFETCH)] = {
514 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
515 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
516 },
517 },
518 [C(L1I)] = {
519 /*
520 * Not all performance counters differentiate between read
521 * and write accesses/misses so we're not always strictly
522 * correct, but it's the best we can do. Writes and reads get
523 * combined in these cases.
524 */
525 [C(OP_READ)] = {
526 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
4d301512 527 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
14abd038
WD
528 },
529 [C(OP_WRITE)] = {
40c390c7
WD
530 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
531 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
14abd038
WD
532 },
533 [C(OP_PREFETCH)] = {
534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
535 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
536 },
537 },
538 [C(LL)] = {
539 [C(OP_READ)] = {
4d301512
WD
540 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
541 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
14abd038
WD
542 },
543 [C(OP_WRITE)] = {
4d301512
WD
544 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
545 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
14abd038
WD
546 },
547 [C(OP_PREFETCH)] = {
548 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
549 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
550 },
551 },
552 [C(DTLB)] = {
553 [C(OP_READ)] = {
554 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 555 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
14abd038
WD
556 },
557 [C(OP_WRITE)] = {
558 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 559 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
14abd038
WD
560 },
561 [C(OP_PREFETCH)] = {
562 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
563 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
564 },
565 },
566 [C(ITLB)] = {
567 [C(OP_READ)] = {
568 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 569 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
14abd038
WD
570 },
571 [C(OP_WRITE)] = {
572 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
4d301512 573 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
14abd038
WD
574 },
575 [C(OP_PREFETCH)] = {
576 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
577 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
578 },
579 },
580 [C(BPU)] = {
581 [C(OP_READ)] = {
582 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
4d301512 583 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
14abd038
WD
584 },
585 [C(OP_WRITE)] = {
586 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
4d301512 587 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
14abd038
WD
588 },
589 [C(OP_PREFETCH)] = {
590 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
591 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
592 },
593 },
91756acb
WD
594 [C(NODE)] = {
595 [C(OP_READ)] = {
596 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
597 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
598 },
599 [C(OP_WRITE)] = {
600 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
601 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
602 },
603 [C(OP_PREFETCH)] = {
604 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
605 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
606 },
607 },
14abd038
WD
608};
609
d33c88c6
WD
610/*
611 * Cortex-A7 HW events mapping
612 */
613static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
614 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
615 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
616 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
617 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
618 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
619 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
620 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
621 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
622 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
623};
624
625static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
626 [PERF_COUNT_HW_CACHE_OP_MAX]
627 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
628 [C(L1D)] = {
629 /*
630 * The performance counters don't differentiate between read
631 * and write accesses/misses so this isn't strictly correct,
632 * but it's the best we can do. Writes and reads get
633 * combined.
634 */
635 [C(OP_READ)] = {
636 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
637 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
638 },
639 [C(OP_WRITE)] = {
640 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
641 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
642 },
643 [C(OP_PREFETCH)] = {
644 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
645 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
646 },
647 },
648 [C(L1I)] = {
649 [C(OP_READ)] = {
650 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
651 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
652 },
653 [C(OP_WRITE)] = {
40c390c7
WD
654 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
655 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
d33c88c6
WD
656 },
657 [C(OP_PREFETCH)] = {
658 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
659 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
660 },
661 },
662 [C(LL)] = {
663 [C(OP_READ)] = {
664 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
665 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
666 },
667 [C(OP_WRITE)] = {
668 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
669 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
670 },
671 [C(OP_PREFETCH)] = {
672 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
673 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
674 },
675 },
676 [C(DTLB)] = {
677 [C(OP_READ)] = {
678 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
679 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
680 },
681 [C(OP_WRITE)] = {
682 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
683 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
684 },
685 [C(OP_PREFETCH)] = {
686 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
687 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
688 },
689 },
690 [C(ITLB)] = {
691 [C(OP_READ)] = {
692 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
693 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
694 },
695 [C(OP_WRITE)] = {
696 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
697 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
698 },
699 [C(OP_PREFETCH)] = {
700 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
701 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
702 },
703 },
704 [C(BPU)] = {
705 [C(OP_READ)] = {
706 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
707 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
708 },
709 [C(OP_WRITE)] = {
710 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
711 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
712 },
713 [C(OP_PREFETCH)] = {
714 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
715 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
716 },
717 },
718 [C(NODE)] = {
719 [C(OP_READ)] = {
720 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
721 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
722 },
723 [C(OP_WRITE)] = {
724 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
725 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
726 },
727 [C(OP_PREFETCH)] = {
728 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
729 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
730 },
731 },
732};
733
43eab878 734/*
c691bb62 735 * Perf Events' indices
43eab878 736 */
c691bb62
WD
737#define ARMV7_IDX_CYCLE_COUNTER 0
738#define ARMV7_IDX_COUNTER0 1
7279adbd
SK
739#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
740 (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
c691bb62
WD
741
742#define ARMV7_MAX_COUNTERS 32
743#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
43eab878
WD
744
745/*
c691bb62 746 * ARMv7 low level PMNC access
43eab878 747 */
43eab878
WD
748
749/*
c691bb62 750 * Perf Event to low level counters mapping
43eab878 751 */
c691bb62
WD
752#define ARMV7_IDX_TO_COUNTER(x) \
753 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
43eab878
WD
754
755/*
756 * Per-CPU PMNC: config reg
757 */
758#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
759#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
760#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
761#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
762#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
763#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
764#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
765#define ARMV7_PMNC_N_MASK 0x1f
766#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
767
768/*
43eab878 769 * FLAG: counters overflow flag status reg
43eab878 770 */
43eab878
WD
771#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
772#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
43eab878
WD
773
774/*
a505addc 775 * PMXEVTYPER: Event selection reg
43eab878 776 */
f2fe09b0 777#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
a505addc 778#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
43eab878
WD
779
780/*
a505addc 781 * Event filters for PMUv2
43eab878 782 */
a505addc
WD
783#define ARMV7_EXCLUDE_PL1 (1 << 31)
784#define ARMV7_EXCLUDE_USER (1 << 30)
785#define ARMV7_INCLUDE_HYP (1 << 27)
43eab878 786
6330aae7 787static inline u32 armv7_pmnc_read(void)
43eab878
WD
788{
789 u32 val;
790 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
791 return val;
792}
793
6330aae7 794static inline void armv7_pmnc_write(u32 val)
43eab878
WD
795{
796 val &= ARMV7_PMNC_MASK;
d25d3b4c 797 isb();
43eab878
WD
798 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
799}
800
6330aae7 801static inline int armv7_pmnc_has_overflowed(u32 pmnc)
43eab878
WD
802{
803 return pmnc & ARMV7_OVERFLOWED_MASK;
804}
805
7279adbd 806static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
c691bb62 807{
7279adbd
SK
808 return idx >= ARMV7_IDX_CYCLE_COUNTER &&
809 idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
c691bb62
WD
810}
811
812static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
43eab878 813{
7279adbd 814 return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
43eab878
WD
815}
816
25e29c7c 817static inline int armv7_pmnc_select_counter(int idx)
43eab878 818{
7279adbd 819 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 820 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
d25d3b4c 821 isb();
43eab878
WD
822
823 return idx;
824}
825
ed6f2a52 826static inline u32 armv7pmu_read_counter(struct perf_event *event)
43eab878 827{
7279adbd 828 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
ed6f2a52
SK
829 struct hw_perf_event *hwc = &event->hw;
830 int idx = hwc->idx;
6330aae7 831 u32 value = 0;
43eab878 832
7279adbd 833 if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
43eab878
WD
834 pr_err("CPU%u reading wrong counter %d\n",
835 smp_processor_id(), idx);
c691bb62
WD
836 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
837 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
838 else if (armv7_pmnc_select_counter(idx) == idx)
839 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
43eab878
WD
840
841 return value;
842}
843
ed6f2a52 844static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
43eab878 845{
7279adbd 846 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
ed6f2a52
SK
847 struct hw_perf_event *hwc = &event->hw;
848 int idx = hwc->idx;
849
7279adbd 850 if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
43eab878
WD
851 pr_err("CPU%u writing wrong counter %d\n",
852 smp_processor_id(), idx);
c691bb62
WD
853 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
854 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
855 else if (armv7_pmnc_select_counter(idx) == idx)
856 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
43eab878
WD
857}
858
25e29c7c 859static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
43eab878
WD
860{
861 if (armv7_pmnc_select_counter(idx) == idx) {
a505addc 862 val &= ARMV7_EVTYPE_MASK;
43eab878
WD
863 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
864 }
865}
866
25e29c7c 867static inline int armv7_pmnc_enable_counter(int idx)
43eab878 868{
7279adbd 869 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 870 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
43eab878
WD
871 return idx;
872}
873
25e29c7c 874static inline int armv7_pmnc_disable_counter(int idx)
43eab878 875{
7279adbd 876 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 877 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
43eab878
WD
878 return idx;
879}
880
25e29c7c 881static inline int armv7_pmnc_enable_intens(int idx)
43eab878 882{
7279adbd 883 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 884 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
43eab878
WD
885 return idx;
886}
887
25e29c7c 888static inline int armv7_pmnc_disable_intens(int idx)
43eab878 889{
7279adbd 890 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 891 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
99c1745b
WD
892 isb();
893 /* Clear the overflow flag in case an interrupt is pending. */
894 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
895 isb();
896
43eab878
WD
897 return idx;
898}
899
900static inline u32 armv7_pmnc_getreset_flags(void)
901{
902 u32 val;
903
904 /* Read */
905 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
906
907 /* Write to clear flags */
908 val &= ARMV7_FLAG_MASK;
909 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
910
911 return val;
912}
913
914#ifdef DEBUG
7279adbd 915static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
43eab878
WD
916{
917 u32 val;
918 unsigned int cnt;
919
920 printk(KERN_INFO "PMNC registers dump:\n");
921
922 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
923 printk(KERN_INFO "PMNC =0x%08x\n", val);
924
925 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
926 printk(KERN_INFO "CNTENS=0x%08x\n", val);
927
928 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
929 printk(KERN_INFO "INTENS=0x%08x\n", val);
930
931 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
932 printk(KERN_INFO "FLAGS =0x%08x\n", val);
933
934 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
935 printk(KERN_INFO "SELECT=0x%08x\n", val);
936
937 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
938 printk(KERN_INFO "CCNT =0x%08x\n", val);
939
7279adbd
SK
940 for (cnt = ARMV7_IDX_COUNTER0;
941 cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
43eab878
WD
942 armv7_pmnc_select_counter(cnt);
943 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
944 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
c691bb62 945 ARMV7_IDX_TO_COUNTER(cnt), val);
43eab878
WD
946 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
947 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
c691bb62 948 ARMV7_IDX_TO_COUNTER(cnt), val);
43eab878
WD
949 }
950}
951#endif
952
6fa3eb70
S
953static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
954 struct cpupmu_regs *regs)
955{
956 unsigned int cnt;
957 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
958 if (!(regs->pmc & ARMV7_PMNC_E))
959 return;
960
961 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
962 asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
963 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
964 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
965 for (cnt = ARMV7_IDX_COUNTER0;
966 cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
967 armv7_pmnc_select_counter(cnt);
968 asm volatile("mrc p15, 0, %0, c9, c13, 1"
969 : "=r"(regs->pmxevttype[cnt]));
970 asm volatile("mrc p15, 0, %0, c9, c13, 2"
971 : "=r"(regs->pmxevtcnt[cnt]));
972 }
973 return;
974}
975
976static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
977 struct cpupmu_regs *regs)
978{
979 unsigned int cnt;
980 if (!(regs->pmc & ARMV7_PMNC_E))
981 return;
982
983 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
984 asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
985 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
986 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
987 for (cnt = ARMV7_IDX_COUNTER0;
988 cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
989 armv7_pmnc_select_counter(cnt);
990 asm volatile("mcr p15, 0, %0, c9, c13, 1"
991 : : "r"(regs->pmxevttype[cnt]));
992 asm volatile("mcr p15, 0, %0, c9, c13, 2"
993 : : "r"(regs->pmxevtcnt[cnt]));
994 }
995 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
996}
997
ed6f2a52 998static void armv7pmu_enable_event(struct perf_event *event)
43eab878
WD
999{
1000 unsigned long flags;
ed6f2a52
SK
1001 struct hw_perf_event *hwc = &event->hw;
1002 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
8be3f9a2 1003 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ed6f2a52 1004 int idx = hwc->idx;
43eab878 1005
7279adbd
SK
1006 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1007 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
1008 smp_processor_id(), idx);
1009 return;
1010 }
1011
43eab878
WD
1012 /*
1013 * Enable counter and interrupt, and set the counter to count
1014 * the event that we're interested in.
1015 */
0f78d2d5 1016 raw_spin_lock_irqsave(&events->pmu_lock, flags);
43eab878
WD
1017
1018 /*
1019 * Disable counter
1020 */
1021 armv7_pmnc_disable_counter(idx);
1022
1023 /*
1024 * Set event (if destined for PMNx counters)
a505addc
WD
1025 * We only need to set the event for the cycle counter if we
1026 * have the ability to perform event filtering.
43eab878 1027 */
513c99ce 1028 if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
43eab878
WD
1029 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1030
1031 /*
1032 * Enable interrupt for this counter
1033 */
1034 armv7_pmnc_enable_intens(idx);
1035
1036 /*
1037 * Enable counter
1038 */
1039 armv7_pmnc_enable_counter(idx);
1040
0f78d2d5 1041 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
43eab878
WD
1042}
1043
ed6f2a52 1044static void armv7pmu_disable_event(struct perf_event *event)
43eab878
WD
1045{
1046 unsigned long flags;
ed6f2a52
SK
1047 struct hw_perf_event *hwc = &event->hw;
1048 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
8be3f9a2 1049 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ed6f2a52 1050 int idx = hwc->idx;
43eab878 1051
7279adbd
SK
1052 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1053 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
1054 smp_processor_id(), idx);
1055 return;
1056 }
1057
43eab878
WD
1058 /*
1059 * Disable counter and interrupt
1060 */
0f78d2d5 1061 raw_spin_lock_irqsave(&events->pmu_lock, flags);
43eab878
WD
1062
1063 /*
1064 * Disable counter
1065 */
1066 armv7_pmnc_disable_counter(idx);
1067
1068 /*
1069 * Disable interrupt for this counter
1070 */
1071 armv7_pmnc_disable_intens(idx);
1072
0f78d2d5 1073 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
43eab878
WD
1074}
1075
1076static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1077{
6330aae7 1078 u32 pmnc;
43eab878 1079 struct perf_sample_data data;
ed6f2a52
SK
1080 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
1081 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
43eab878
WD
1082 struct pt_regs *regs;
1083 int idx;
1084
1085 /*
1086 * Get and reset the IRQ flags
1087 */
1088 pmnc = armv7_pmnc_getreset_flags();
1089
1090 /*
1091 * Did an overflow occur?
1092 */
1093 if (!armv7_pmnc_has_overflowed(pmnc))
1094 return IRQ_NONE;
1095
1096 /*
1097 * Handle the counter(s) overflow(s)
1098 */
1099 regs = get_irq_regs();
1100
8be3f9a2 1101 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
43eab878
WD
1102 struct perf_event *event = cpuc->events[idx];
1103 struct hw_perf_event *hwc;
1104
f6f5a30c
WD
1105 /* Ignore if we don't have an event. */
1106 if (!event)
1107 continue;
1108
43eab878
WD
1109 /*
1110 * We have a single interrupt for all counters. Check that
1111 * each counter has overflowed before we process it.
1112 */
1113 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1114 continue;
1115
1116 hwc = &event->hw;
ed6f2a52 1117 armpmu_event_update(event);
fd0d000b 1118 perf_sample_data_init(&data, 0, hwc->last_period);
ed6f2a52 1119 if (!armpmu_event_set_period(event))
43eab878
WD
1120 continue;
1121
a8b0ca17 1122 if (perf_event_overflow(event, &data, regs))
ed6f2a52 1123 cpu_pmu->disable(event);
43eab878
WD
1124 }
1125
1126 /*
1127 * Handle the pending perf events.
1128 *
1129 * Note: this call *must* be run with interrupts disabled. For
1130 * platforms that can have the PMU interrupts raised as an NMI, this
1131 * will not work.
1132 */
1133 irq_work_run();
1134
1135 return IRQ_HANDLED;
1136}
1137
ed6f2a52 1138static void armv7pmu_start(struct arm_pmu *cpu_pmu)
43eab878
WD
1139{
1140 unsigned long flags;
8be3f9a2 1141 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
43eab878 1142
0f78d2d5 1143 raw_spin_lock_irqsave(&events->pmu_lock, flags);
43eab878
WD
1144 /* Enable all counters */
1145 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
0f78d2d5 1146 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
43eab878
WD
1147}
1148
ed6f2a52 1149static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
43eab878
WD
1150{
1151 unsigned long flags;
8be3f9a2 1152 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
43eab878 1153
0f78d2d5 1154 raw_spin_lock_irqsave(&events->pmu_lock, flags);
43eab878
WD
1155 /* Disable all counters */
1156 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
0f78d2d5 1157 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
43eab878
WD
1158}
1159
8be3f9a2 1160static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
ed6f2a52 1161 struct perf_event *event)
43eab878
WD
1162{
1163 int idx;
ed6f2a52
SK
1164 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1165 struct hw_perf_event *hwc = &event->hw;
1166 unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
43eab878
WD
1167
1168 /* Always place a cycle counter into the cycle counter. */
a505addc 1169 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
c691bb62 1170 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
43eab878
WD
1171 return -EAGAIN;
1172
c691bb62
WD
1173 return ARMV7_IDX_CYCLE_COUNTER;
1174 }
43eab878 1175
c691bb62
WD
1176 /*
1177 * For anything other than a cycle counter, try and use
1178 * the events counters
1179 */
8be3f9a2 1180 for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
c691bb62
WD
1181 if (!test_and_set_bit(idx, cpuc->used_mask))
1182 return idx;
43eab878 1183 }
c691bb62
WD
1184
1185 /* The counters are all in use. */
1186 return -EAGAIN;
43eab878
WD
1187}
1188
a505addc
WD
1189/*
1190 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1191 */
1192static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1193 struct perf_event_attr *attr)
1194{
1195 unsigned long config_base = 0;
1196
1197 if (attr->exclude_idle)
1198 return -EPERM;
1199 if (attr->exclude_user)
1200 config_base |= ARMV7_EXCLUDE_USER;
1201 if (attr->exclude_kernel)
1202 config_base |= ARMV7_EXCLUDE_PL1;
1203 if (!attr->exclude_hv)
1204 config_base |= ARMV7_INCLUDE_HYP;
1205
1206 /*
1207 * Install the filter into config_base as this is used to
1208 * construct the event type.
1209 */
1210 event->config_base = config_base;
1211
1212 return 0;
43eab878
WD
1213}
1214
574b69cb
WD
1215static void armv7pmu_reset(void *info)
1216{
ed6f2a52 1217 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
8be3f9a2 1218 u32 idx, nb_cnt = cpu_pmu->num_events;
574b69cb
WD
1219
1220 /* The counter and interrupt enable registers are unknown at reset. */
ed6f2a52
SK
1221 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1222 armv7_pmnc_disable_counter(idx);
1223 armv7_pmnc_disable_intens(idx);
1224 }
574b69cb
WD
1225
1226 /* Initialize & Reset PMNC: C and P bits */
1227 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1228}
1229
e1f431b5
MR
1230static int armv7_a8_map_event(struct perf_event *event)
1231{
6dbc0029 1232 return armpmu_map_event(event, &armv7_a8_perf_map,
e1f431b5
MR
1233 &armv7_a8_perf_cache_map, 0xFF);
1234}
1235
1236static int armv7_a9_map_event(struct perf_event *event)
1237{
6dbc0029 1238 return armpmu_map_event(event, &armv7_a9_perf_map,
e1f431b5
MR
1239 &armv7_a9_perf_cache_map, 0xFF);
1240}
1241
1242static int armv7_a5_map_event(struct perf_event *event)
1243{
6dbc0029 1244 return armpmu_map_event(event, &armv7_a5_perf_map,
e1f431b5
MR
1245 &armv7_a5_perf_cache_map, 0xFF);
1246}
1247
1248static int armv7_a15_map_event(struct perf_event *event)
1249{
6dbc0029 1250 return armpmu_map_event(event, &armv7_a15_perf_map,
e1f431b5
MR
1251 &armv7_a15_perf_cache_map, 0xFF);
1252}
1253
d33c88c6
WD
1254static int armv7_a7_map_event(struct perf_event *event)
1255{
6dbc0029 1256 return armpmu_map_event(event, &armv7_a7_perf_map,
d33c88c6
WD
1257 &armv7_a7_perf_cache_map, 0xFF);
1258}
1259
513c99ce
SK
1260static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1261{
1262 cpu_pmu->handle_irq = armv7pmu_handle_irq;
1263 cpu_pmu->enable = armv7pmu_enable_event;
1264 cpu_pmu->disable = armv7pmu_disable_event;
1265 cpu_pmu->read_counter = armv7pmu_read_counter;
1266 cpu_pmu->write_counter = armv7pmu_write_counter;
1267 cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
1268 cpu_pmu->start = armv7pmu_start;
1269 cpu_pmu->stop = armv7pmu_stop;
1270 cpu_pmu->reset = armv7pmu_reset;
6fa3eb70
S
1271 cpu_pmu->save_regs = armv7pmu_save_regs;
1272 cpu_pmu->restore_regs = armv7pmu_restore_regs;
513c99ce 1273 cpu_pmu->max_period = (1LLU << 32) - 1;
43eab878
WD
1274};
1275
351a102d 1276static u32 armv7_read_num_pmnc_events(void)
43eab878
WD
1277{
1278 u32 nb_cnt;
1279
43eab878
WD
1280 /* Read the nb of CNTx counters supported from PMNC */
1281 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1282
1283 /* Add the CPU cycles counter and return */
1284 return nb_cnt + 1;
1285}
1286
351a102d 1287static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
43eab878 1288{
513c99ce 1289 armv7pmu_init(cpu_pmu);
6fa3eb70 1290 cpu_pmu->name = "ARMv7_Cortex_A8";
513c99ce
SK
1291 cpu_pmu->map_event = armv7_a8_map_event;
1292 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1293 return 0;
43eab878
WD
1294}
1295
351a102d 1296static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
43eab878 1297{
513c99ce 1298 armv7pmu_init(cpu_pmu);
6fa3eb70 1299 cpu_pmu->name = "ARMv7_Cortex_A9";
513c99ce
SK
1300 cpu_pmu->map_event = armv7_a9_map_event;
1301 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1302 return 0;
43eab878 1303}
0c205cbe 1304
351a102d 1305static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
0c205cbe 1306{
513c99ce 1307 armv7pmu_init(cpu_pmu);
6fa3eb70 1308 cpu_pmu->name = "ARMv7_Cortex_A5";
513c99ce
SK
1309 cpu_pmu->map_event = armv7_a5_map_event;
1310 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1311 return 0;
0c205cbe 1312}
14abd038 1313
351a102d 1314static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
14abd038 1315{
513c99ce 1316 armv7pmu_init(cpu_pmu);
6fa3eb70 1317 cpu_pmu->name = "ARMv7_Cortex_A15";
513c99ce
SK
1318 cpu_pmu->map_event = armv7_a15_map_event;
1319 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1320 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1321 return 0;
14abd038 1322}
d33c88c6 1323
351a102d 1324static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
d33c88c6 1325{
513c99ce 1326 armv7pmu_init(cpu_pmu);
6fa3eb70 1327 cpu_pmu->name = "ARMv7_Cortex_A7";
513c99ce
SK
1328 cpu_pmu->map_event = armv7_a7_map_event;
1329 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1330 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1331 return 0;
d33c88c6 1332}
43eab878 1333#else
513c99ce 1334static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
43eab878 1335{
513c99ce 1336 return -ENODEV;
43eab878
WD
1337}
1338
513c99ce 1339static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
43eab878 1340{
513c99ce 1341 return -ENODEV;
43eab878 1342}
0c205cbe 1343
513c99ce 1344static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
0c205cbe 1345{
513c99ce 1346 return -ENODEV;
0c205cbe 1347}
14abd038 1348
513c99ce 1349static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
14abd038 1350{
513c99ce 1351 return -ENODEV;
14abd038 1352}
d33c88c6 1353
513c99ce 1354static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
d33c88c6 1355{
513c99ce 1356 return -ENODEV;
d33c88c6 1357}
43eab878 1358#endif /* CONFIG_CPU_V7 */