Commit | Line | Data |
---|---|---|
f22f54f4 PZ |
1 | #ifdef CONFIG_CPU_SUP_AMD |
2 | ||
caaa8be3 | 3 | static __initconst const u64 amd_hw_cache_event_ids |
f22f54f4 PZ |
4 | [PERF_COUNT_HW_CACHE_MAX] |
5 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
6 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
7 | { | |
8 | [ C(L1D) ] = { | |
9 | [ C(OP_READ) ] = { | |
10 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
83112e68 | 11 | [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ |
f22f54f4 PZ |
12 | }, |
13 | [ C(OP_WRITE) ] = { | |
14 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ | |
15 | [ C(RESULT_MISS) ] = 0, | |
16 | }, | |
17 | [ C(OP_PREFETCH) ] = { | |
18 | [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ | |
19 | [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ | |
20 | }, | |
21 | }, | |
22 | [ C(L1I ) ] = { | |
23 | [ C(OP_READ) ] = { | |
24 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | |
25 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | |
26 | }, | |
27 | [ C(OP_WRITE) ] = { | |
28 | [ C(RESULT_ACCESS) ] = -1, | |
29 | [ C(RESULT_MISS) ] = -1, | |
30 | }, | |
31 | [ C(OP_PREFETCH) ] = { | |
32 | [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ | |
33 | [ C(RESULT_MISS) ] = 0, | |
34 | }, | |
35 | }, | |
36 | [ C(LL ) ] = { | |
37 | [ C(OP_READ) ] = { | |
38 | [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ | |
39 | [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ | |
40 | }, | |
41 | [ C(OP_WRITE) ] = { | |
42 | [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ | |
43 | [ C(RESULT_MISS) ] = 0, | |
44 | }, | |
45 | [ C(OP_PREFETCH) ] = { | |
46 | [ C(RESULT_ACCESS) ] = 0, | |
47 | [ C(RESULT_MISS) ] = 0, | |
48 | }, | |
49 | }, | |
50 | [ C(DTLB) ] = { | |
51 | [ C(OP_READ) ] = { | |
52 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
ba0cef3d | 53 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
f22f54f4 PZ |
54 | }, |
55 | [ C(OP_WRITE) ] = { | |
56 | [ C(RESULT_ACCESS) ] = 0, | |
57 | [ C(RESULT_MISS) ] = 0, | |
58 | }, | |
59 | [ C(OP_PREFETCH) ] = { | |
60 | [ C(RESULT_ACCESS) ] = 0, | |
61 | [ C(RESULT_MISS) ] = 0, | |
62 | }, | |
63 | }, | |
64 | [ C(ITLB) ] = { | |
65 | [ C(OP_READ) ] = { | |
66 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | |
ba0cef3d | 67 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
f22f54f4 PZ |
68 | }, |
69 | [ C(OP_WRITE) ] = { | |
70 | [ C(RESULT_ACCESS) ] = -1, | |
71 | [ C(RESULT_MISS) ] = -1, | |
72 | }, | |
73 | [ C(OP_PREFETCH) ] = { | |
74 | [ C(RESULT_ACCESS) ] = -1, | |
75 | [ C(RESULT_MISS) ] = -1, | |
76 | }, | |
77 | }, | |
78 | [ C(BPU ) ] = { | |
79 | [ C(OP_READ) ] = { | |
80 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | |
81 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | |
82 | }, | |
83 | [ C(OP_WRITE) ] = { | |
84 | [ C(RESULT_ACCESS) ] = -1, | |
85 | [ C(RESULT_MISS) ] = -1, | |
86 | }, | |
87 | [ C(OP_PREFETCH) ] = { | |
88 | [ C(RESULT_ACCESS) ] = -1, | |
89 | [ C(RESULT_MISS) ] = -1, | |
90 | }, | |
91 | }, | |
89d6c0b5 PZ |
92 | [ C(NODE) ] = { |
93 | [ C(OP_READ) ] = { | |
94 | [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ | |
95 | [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ | |
96 | }, | |
97 | [ C(OP_WRITE) ] = { | |
98 | [ C(RESULT_ACCESS) ] = -1, | |
99 | [ C(RESULT_MISS) ] = -1, | |
100 | }, | |
101 | [ C(OP_PREFETCH) ] = { | |
102 | [ C(RESULT_ACCESS) ] = -1, | |
103 | [ C(RESULT_MISS) ] = -1, | |
104 | }, | |
105 | }, | |
f22f54f4 PZ |
106 | }; |
107 | ||
108 | /* | |
109 | * AMD Performance Monitor K7 and later. | |
110 | */ | |
111 | static const u64 amd_perfmon_event_map[] = | |
112 | { | |
91fc4cc0 IM |
113 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
114 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
115 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, | |
116 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, | |
117 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | |
118 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | |
119 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ | |
120 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ | |
f22f54f4 PZ |
121 | }; |
122 | ||
123 | static u64 amd_pmu_event_map(int hw_event) | |
124 | { | |
125 | return amd_perfmon_event_map[hw_event]; | |
126 | } | |
127 | ||
b4cdc5c2 | 128 | static int amd_pmu_hw_config(struct perf_event *event) |
f22f54f4 | 129 | { |
b4cdc5c2 PZ |
130 | int ret = x86_pmu_hw_config(event); |
131 | ||
132 | if (ret) | |
133 | return ret; | |
134 | ||
135 | if (event->attr.type != PERF_TYPE_RAW) | |
136 | return 0; | |
137 | ||
138 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | |
139 | ||
140 | return 0; | |
f22f54f4 PZ |
141 | } |
142 | ||
143 | /* | |
144 | * AMD64 events are detected based on their event codes. | |
145 | */ | |
4979d272 RR |
146 | static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) |
147 | { | |
148 | return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); | |
149 | } | |
150 | ||
f22f54f4 PZ |
151 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) |
152 | { | |
153 | return (hwc->config & 0xe0) == 0xe0; | |
154 | } | |
155 | ||
b38b24ea PZ |
156 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
157 | { | |
158 | struct amd_nb *nb = cpuc->amd_nb; | |
159 | ||
160 | return nb && nb->nb_id != -1; | |
161 | } | |
162 | ||
f22f54f4 PZ |
163 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, |
164 | struct perf_event *event) | |
165 | { | |
166 | struct hw_perf_event *hwc = &event->hw; | |
167 | struct amd_nb *nb = cpuc->amd_nb; | |
168 | int i; | |
169 | ||
170 | /* | |
171 | * only care about NB events | |
172 | */ | |
b38b24ea | 173 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
f22f54f4 PZ |
174 | return; |
175 | ||
176 | /* | |
177 | * need to scan whole list because event may not have | |
178 | * been assigned during scheduling | |
179 | * | |
180 | * no race condition possible because event can only | |
181 | * be removed on one CPU at a time AND PMU is disabled | |
182 | * when we come here | |
183 | */ | |
948b1bb8 | 184 | for (i = 0; i < x86_pmu.num_counters; i++) { |
f22f54f4 PZ |
185 | if (nb->owners[i] == event) { |
186 | cmpxchg(nb->owners+i, event, NULL); | |
187 | break; | |
188 | } | |
189 | } | |
190 | } | |
191 | ||
192 | /* | |
193 | * AMD64 NorthBridge events need special treatment because | |
194 | * counter access needs to be synchronized across all cores | |
195 | * of a package. Refer to BKDG section 3.12 | |
196 | * | |
197 | * NB events are events measuring L3 cache, Hypertransport | |
198 | * traffic. They are identified by an event code >= 0xe00. | |
199 | * They measure events on the NorthBride which is shared | |
200 | * by all cores on a package. NB events are counted on a | |
201 | * shared set of counters. When a NB event is programmed | |
202 | * in a counter, the data actually comes from a shared | |
203 | * counter. Thus, access to those counters needs to be | |
204 | * synchronized. | |
205 | * | |
206 | * We implement the synchronization such that no two cores | |
207 | * can be measuring NB events using the same counters. Thus, | |
208 | * we maintain a per-NB allocation table. The available slot | |
209 | * is propagated using the event_constraint structure. | |
210 | * | |
211 | * We provide only one choice for each NB event based on | |
212 | * the fact that only NB events have restrictions. Consequently, | |
213 | * if a counter is available, there is a guarantee the NB event | |
214 | * will be assigned to it. If no slot is available, an empty | |
215 | * constraint is returned and scheduling will eventually fail | |
216 | * for this event. | |
217 | * | |
218 | * Note that all cores attached the same NB compete for the same | |
219 | * counters to host NB events, this is why we use atomic ops. Some | |
220 | * multi-chip CPUs may have more than one NB. | |
221 | * | |
222 | * Given that resources are allocated (cmpxchg), they must be | |
223 | * eventually freed for others to use. This is accomplished by | |
224 | * calling amd_put_event_constraints(). | |
225 | * | |
226 | * Non NB events are not impacted by this restriction. | |
227 | */ | |
228 | static struct event_constraint * | |
229 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |
230 | { | |
231 | struct hw_perf_event *hwc = &event->hw; | |
232 | struct amd_nb *nb = cpuc->amd_nb; | |
233 | struct perf_event *old = NULL; | |
948b1bb8 | 234 | int max = x86_pmu.num_counters; |
f22f54f4 PZ |
235 | int i, j, k = -1; |
236 | ||
237 | /* | |
238 | * if not NB event or no NB, then no constraints | |
239 | */ | |
b38b24ea | 240 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
f22f54f4 PZ |
241 | return &unconstrained; |
242 | ||
243 | /* | |
244 | * detect if already present, if so reuse | |
245 | * | |
246 | * cannot merge with actual allocation | |
247 | * because of possible holes | |
248 | * | |
249 | * event can already be present yet not assigned (in hwc->idx) | |
250 | * because of successive calls to x86_schedule_events() from | |
251 | * hw_perf_group_sched_in() without hw_perf_enable() | |
252 | */ | |
253 | for (i = 0; i < max; i++) { | |
254 | /* | |
255 | * keep track of first free slot | |
256 | */ | |
257 | if (k == -1 && !nb->owners[i]) | |
258 | k = i; | |
259 | ||
260 | /* already present, reuse */ | |
261 | if (nb->owners[i] == event) | |
262 | goto done; | |
263 | } | |
264 | /* | |
265 | * not present, so grab a new slot | |
266 | * starting either at: | |
267 | */ | |
268 | if (hwc->idx != -1) { | |
269 | /* previous assignment */ | |
270 | i = hwc->idx; | |
271 | } else if (k != -1) { | |
272 | /* start from free slot found */ | |
273 | i = k; | |
274 | } else { | |
275 | /* | |
276 | * event not found, no slot found in | |
277 | * first pass, try again from the | |
278 | * beginning | |
279 | */ | |
280 | i = 0; | |
281 | } | |
282 | j = i; | |
283 | do { | |
284 | old = cmpxchg(nb->owners+i, NULL, event); | |
285 | if (!old) | |
286 | break; | |
287 | if (++i == max) | |
288 | i = 0; | |
289 | } while (i != j); | |
290 | done: | |
291 | if (!old) | |
292 | return &nb->event_constraints[i]; | |
293 | ||
294 | return &emptyconstraint; | |
295 | } | |
296 | ||
c079c791 | 297 | static struct amd_nb *amd_alloc_nb(int cpu) |
f22f54f4 PZ |
298 | { |
299 | struct amd_nb *nb; | |
300 | int i; | |
301 | ||
034c6efa PZ |
302 | nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, |
303 | cpu_to_node(cpu)); | |
f22f54f4 PZ |
304 | if (!nb) |
305 | return NULL; | |
306 | ||
c079c791 | 307 | nb->nb_id = -1; |
f22f54f4 PZ |
308 | |
309 | /* | |
310 | * initialize all possible NB constraints | |
311 | */ | |
948b1bb8 | 312 | for (i = 0; i < x86_pmu.num_counters; i++) { |
34538ee7 | 313 | __set_bit(i, nb->event_constraints[i].idxmsk); |
f22f54f4 PZ |
314 | nb->event_constraints[i].weight = 1; |
315 | } | |
316 | return nb; | |
317 | } | |
318 | ||
b38b24ea PZ |
319 | static int amd_pmu_cpu_prepare(int cpu) |
320 | { | |
321 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | |
322 | ||
323 | WARN_ON_ONCE(cpuc->amd_nb); | |
324 | ||
325 | if (boot_cpu_data.x86_max_cores < 2) | |
326 | return NOTIFY_OK; | |
327 | ||
c079c791 | 328 | cpuc->amd_nb = amd_alloc_nb(cpu); |
b38b24ea PZ |
329 | if (!cpuc->amd_nb) |
330 | return NOTIFY_BAD; | |
331 | ||
332 | return NOTIFY_OK; | |
333 | } | |
334 | ||
335 | static void amd_pmu_cpu_starting(int cpu) | |
f22f54f4 | 336 | { |
b38b24ea PZ |
337 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
338 | struct amd_nb *nb; | |
f22f54f4 PZ |
339 | int i, nb_id; |
340 | ||
341 | if (boot_cpu_data.x86_max_cores < 2) | |
342 | return; | |
343 | ||
f22f54f4 | 344 | nb_id = amd_get_nb_id(cpu); |
b38b24ea | 345 | WARN_ON_ONCE(nb_id == BAD_APICID); |
f22f54f4 | 346 | |
f22f54f4 | 347 | for_each_online_cpu(i) { |
b38b24ea PZ |
348 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
349 | if (WARN_ON_ONCE(!nb)) | |
f22f54f4 | 350 | continue; |
f22f54f4 | 351 | |
b38b24ea | 352 | if (nb->nb_id == nb_id) { |
7fdba1ca | 353 | cpuc->kfree_on_online = cpuc->amd_nb; |
b38b24ea PZ |
354 | cpuc->amd_nb = nb; |
355 | break; | |
356 | } | |
f22f54f4 | 357 | } |
b38b24ea PZ |
358 | |
359 | cpuc->amd_nb->nb_id = nb_id; | |
360 | cpuc->amd_nb->refcnt++; | |
f22f54f4 PZ |
361 | } |
362 | ||
b38b24ea | 363 | static void amd_pmu_cpu_dead(int cpu) |
f22f54f4 PZ |
364 | { |
365 | struct cpu_hw_events *cpuhw; | |
366 | ||
367 | if (boot_cpu_data.x86_max_cores < 2) | |
368 | return; | |
369 | ||
370 | cpuhw = &per_cpu(cpu_hw_events, cpu); | |
371 | ||
a90110c6 | 372 | if (cpuhw->amd_nb) { |
b38b24ea PZ |
373 | struct amd_nb *nb = cpuhw->amd_nb; |
374 | ||
375 | if (nb->nb_id == -1 || --nb->refcnt == 0) | |
376 | kfree(nb); | |
f22f54f4 | 377 | |
a90110c6 RW |
378 | cpuhw->amd_nb = NULL; |
379 | } | |
f22f54f4 PZ |
380 | } |
381 | ||
caaa8be3 | 382 | static __initconst const struct x86_pmu amd_pmu = { |
3f6da390 PZ |
383 | .name = "AMD", |
384 | .handle_irq = x86_pmu_handle_irq, | |
385 | .disable_all = x86_pmu_disable_all, | |
386 | .enable_all = x86_pmu_enable_all, | |
387 | .enable = x86_pmu_enable_event, | |
388 | .disable = x86_pmu_disable_event, | |
b4cdc5c2 | 389 | .hw_config = amd_pmu_hw_config, |
a072738e | 390 | .schedule_events = x86_schedule_events, |
3f6da390 PZ |
391 | .eventsel = MSR_K7_EVNTSEL0, |
392 | .perfctr = MSR_K7_PERFCTR0, | |
393 | .event_map = amd_pmu_event_map, | |
3f6da390 | 394 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
948b1bb8 RR |
395 | .num_counters = 4, |
396 | .cntval_bits = 48, | |
397 | .cntval_mask = (1ULL << 48) - 1, | |
3f6da390 PZ |
398 | .apic = 1, |
399 | /* use highest bit to detect overflow */ | |
400 | .max_period = (1ULL << 47) - 1, | |
401 | .get_event_constraints = amd_get_event_constraints, | |
402 | .put_event_constraints = amd_put_event_constraints, | |
403 | ||
b38b24ea PZ |
404 | .cpu_prepare = amd_pmu_cpu_prepare, |
405 | .cpu_starting = amd_pmu_cpu_starting, | |
406 | .cpu_dead = amd_pmu_cpu_dead, | |
3f6da390 PZ |
407 | }; |
408 | ||
4979d272 RR |
409 | /* AMD Family 15h */ |
410 | ||
411 | #define AMD_EVENT_TYPE_MASK 0x000000F0ULL | |
412 | ||
413 | #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL | |
414 | #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL | |
415 | #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL | |
416 | #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL | |
417 | #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL | |
418 | #define AMD_EVENT_EX_LS 0x000000C0ULL | |
419 | #define AMD_EVENT_DE 0x000000D0ULL | |
420 | #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL | |
421 | ||
422 | /* | |
423 | * AMD family 15h event code/PMC mappings: | |
424 | * | |
425 | * type = event_code & 0x0F0: | |
426 | * | |
427 | * 0x000 FP PERF_CTL[5:3] | |
428 | * 0x010 FP PERF_CTL[5:3] | |
429 | * 0x020 LS PERF_CTL[5:0] | |
430 | * 0x030 LS PERF_CTL[5:0] | |
431 | * 0x040 DC PERF_CTL[5:0] | |
432 | * 0x050 DC PERF_CTL[5:0] | |
433 | * 0x060 CU PERF_CTL[2:0] | |
434 | * 0x070 CU PERF_CTL[2:0] | |
435 | * 0x080 IC/DE PERF_CTL[2:0] | |
436 | * 0x090 IC/DE PERF_CTL[2:0] | |
437 | * 0x0A0 --- | |
438 | * 0x0B0 --- | |
439 | * 0x0C0 EX/LS PERF_CTL[5:0] | |
440 | * 0x0D0 DE PERF_CTL[2:0] | |
441 | * 0x0E0 NB NB_PERF_CTL[3:0] | |
442 | * 0x0F0 NB NB_PERF_CTL[3:0] | |
443 | * | |
444 | * Exceptions: | |
445 | * | |
855357a2 | 446 | * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 | 447 | * 0x003 FP PERF_CTL[3] |
855357a2 | 448 | * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 RR |
449 | * 0x00B FP PERF_CTL[3] |
450 | * 0x00D FP PERF_CTL[3] | |
451 | * 0x023 DE PERF_CTL[2:0] | |
452 | * 0x02D LS PERF_CTL[3] | |
453 | * 0x02E LS PERF_CTL[3,0] | |
454 | * 0x043 CU PERF_CTL[2:0] | |
455 | * 0x045 CU PERF_CTL[2:0] | |
456 | * 0x046 CU PERF_CTL[2:0] | |
457 | * 0x054 CU PERF_CTL[2:0] | |
458 | * 0x055 CU PERF_CTL[2:0] | |
459 | * 0x08F IC PERF_CTL[0] | |
460 | * 0x187 DE PERF_CTL[0] | |
461 | * 0x188 DE PERF_CTL[0] | |
462 | * 0x0DB EX PERF_CTL[5:0] | |
463 | * 0x0DC LS PERF_CTL[5:0] | |
464 | * 0x0DD LS PERF_CTL[5:0] | |
465 | * 0x0DE LS PERF_CTL[5:0] | |
466 | * 0x0DF LS PERF_CTL[5:0] | |
467 | * 0x1D6 EX PERF_CTL[5:0] | |
468 | * 0x1D8 EX PERF_CTL[5:0] | |
855357a2 RR |
469 | * |
470 | * (*) depending on the umask all FPU counters may be used | |
4979d272 RR |
471 | */ |
472 | ||
473 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | |
474 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | |
475 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | |
476 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0); | |
477 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); | |
478 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | |
479 | ||
480 | static struct event_constraint * | |
481 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | |
482 | { | |
855357a2 RR |
483 | struct hw_perf_event *hwc = &event->hw; |
484 | unsigned int event_code = amd_get_event_code(hwc); | |
4979d272 RR |
485 | |
486 | switch (event_code & AMD_EVENT_TYPE_MASK) { | |
487 | case AMD_EVENT_FP: | |
488 | switch (event_code) { | |
855357a2 RR |
489 | case 0x000: |
490 | if (!(hwc->config & 0x0000F000ULL)) | |
491 | break; | |
492 | if (!(hwc->config & 0x00000F00ULL)) | |
493 | break; | |
494 | return &amd_f15_PMC3; | |
495 | case 0x004: | |
496 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
497 | break; | |
498 | return &amd_f15_PMC3; | |
4979d272 RR |
499 | case 0x003: |
500 | case 0x00B: | |
501 | case 0x00D: | |
502 | return &amd_f15_PMC3; | |
4979d272 | 503 | } |
855357a2 | 504 | return &amd_f15_PMC53; |
4979d272 RR |
505 | case AMD_EVENT_LS: |
506 | case AMD_EVENT_DC: | |
507 | case AMD_EVENT_EX_LS: | |
508 | switch (event_code) { | |
509 | case 0x023: | |
510 | case 0x043: | |
511 | case 0x045: | |
512 | case 0x046: | |
513 | case 0x054: | |
514 | case 0x055: | |
515 | return &amd_f15_PMC20; | |
516 | case 0x02D: | |
517 | return &amd_f15_PMC3; | |
518 | case 0x02E: | |
519 | return &amd_f15_PMC30; | |
520 | default: | |
521 | return &amd_f15_PMC50; | |
522 | } | |
523 | case AMD_EVENT_CU: | |
524 | case AMD_EVENT_IC_DE: | |
525 | case AMD_EVENT_DE: | |
526 | switch (event_code) { | |
527 | case 0x08F: | |
528 | case 0x187: | |
529 | case 0x188: | |
530 | return &amd_f15_PMC0; | |
531 | case 0x0DB ... 0x0DF: | |
532 | case 0x1D6: | |
533 | case 0x1D8: | |
534 | return &amd_f15_PMC50; | |
535 | default: | |
536 | return &amd_f15_PMC20; | |
537 | } | |
538 | case AMD_EVENT_NB: | |
539 | /* not yet implemented */ | |
540 | return &emptyconstraint; | |
541 | default: | |
542 | return &emptyconstraint; | |
543 | } | |
544 | } | |
545 | ||
546 | static __initconst const struct x86_pmu amd_pmu_f15h = { | |
547 | .name = "AMD Family 15h", | |
548 | .handle_irq = x86_pmu_handle_irq, | |
549 | .disable_all = x86_pmu_disable_all, | |
550 | .enable_all = x86_pmu_enable_all, | |
551 | .enable = x86_pmu_enable_event, | |
552 | .disable = x86_pmu_disable_event, | |
553 | .hw_config = amd_pmu_hw_config, | |
554 | .schedule_events = x86_schedule_events, | |
555 | .eventsel = MSR_F15H_PERF_CTL, | |
556 | .perfctr = MSR_F15H_PERF_CTR, | |
557 | .event_map = amd_pmu_event_map, | |
558 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | |
559 | .num_counters = 6, | |
560 | .cntval_bits = 48, | |
561 | .cntval_mask = (1ULL << 48) - 1, | |
562 | .apic = 1, | |
563 | /* use highest bit to detect overflow */ | |
564 | .max_period = (1ULL << 47) - 1, | |
565 | .get_event_constraints = amd_get_event_constraints_f15h, | |
566 | /* nortbridge counters not yet implemented: */ | |
567 | #if 0 | |
568 | .put_event_constraints = amd_put_event_constraints, | |
569 | ||
570 | .cpu_prepare = amd_pmu_cpu_prepare, | |
571 | .cpu_starting = amd_pmu_cpu_starting, | |
572 | .cpu_dead = amd_pmu_cpu_dead, | |
573 | #endif | |
574 | }; | |
575 | ||
f22f54f4 PZ |
576 | static __init int amd_pmu_init(void) |
577 | { | |
578 | /* Performance-monitoring supported from K7 and later: */ | |
579 | if (boot_cpu_data.x86 < 6) | |
580 | return -ENODEV; | |
581 | ||
4979d272 RR |
582 | /* |
583 | * If core performance counter extensions exists, it must be | |
584 | * family 15h, otherwise fail. See x86_pmu_addr_offset(). | |
585 | */ | |
586 | switch (boot_cpu_data.x86) { | |
587 | case 0x15: | |
588 | if (!cpu_has_perfctr_core) | |
589 | return -ENODEV; | |
590 | x86_pmu = amd_pmu_f15h; | |
591 | break; | |
592 | default: | |
593 | if (cpu_has_perfctr_core) | |
594 | return -ENODEV; | |
595 | x86_pmu = amd_pmu; | |
596 | break; | |
597 | } | |
f22f54f4 PZ |
598 | |
599 | /* Events are common for all AMDs */ | |
600 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | |
601 | sizeof(hw_cache_event_ids)); | |
602 | ||
f22f54f4 PZ |
603 | return 0; |
604 | } | |
605 | ||
606 | #else /* CONFIG_CPU_SUP_AMD */ | |
607 | ||
608 | static int amd_pmu_init(void) | |
609 | { | |
610 | return 0; | |
611 | } | |
612 | ||
f22f54f4 | 613 | #endif |