perf: Trivial cleanup of duplicate code
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / cpu / perf_event_amd.c
CommitLineData
de0428a7 1#include <linux/perf_event.h>
1018faa6 2#include <linux/export.h>
de0428a7
KW
3#include <linux/types.h>
4#include <linux/init.h>
5#include <linux/slab.h>
d6eed550 6#include <asm/apicdef.h>
de0428a7
KW
7
8#include "perf_event.h"
f22f54f4 9
caaa8be3 10static __initconst const u64 amd_hw_cache_event_ids
f22f54f4
PZ
11 [PERF_COUNT_HW_CACHE_MAX]
12 [PERF_COUNT_HW_CACHE_OP_MAX]
13 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
14{
15 [ C(L1D) ] = {
16 [ C(OP_READ) ] = {
17 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
83112e68 18 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
f22f54f4
PZ
19 },
20 [ C(OP_WRITE) ] = {
21 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
22 [ C(RESULT_MISS) ] = 0,
23 },
24 [ C(OP_PREFETCH) ] = {
25 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
26 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
27 },
28 },
29 [ C(L1I ) ] = {
30 [ C(OP_READ) ] = {
31 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
32 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
33 },
34 [ C(OP_WRITE) ] = {
35 [ C(RESULT_ACCESS) ] = -1,
36 [ C(RESULT_MISS) ] = -1,
37 },
38 [ C(OP_PREFETCH) ] = {
39 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
40 [ C(RESULT_MISS) ] = 0,
41 },
42 },
43 [ C(LL ) ] = {
44 [ C(OP_READ) ] = {
45 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
47 },
48 [ C(OP_WRITE) ] = {
49 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
50 [ C(RESULT_MISS) ] = 0,
51 },
52 [ C(OP_PREFETCH) ] = {
53 [ C(RESULT_ACCESS) ] = 0,
54 [ C(RESULT_MISS) ] = 0,
55 },
56 },
57 [ C(DTLB) ] = {
58 [ C(OP_READ) ] = {
59 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
ba0cef3d 60 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
f22f54f4
PZ
61 },
62 [ C(OP_WRITE) ] = {
63 [ C(RESULT_ACCESS) ] = 0,
64 [ C(RESULT_MISS) ] = 0,
65 },
66 [ C(OP_PREFETCH) ] = {
67 [ C(RESULT_ACCESS) ] = 0,
68 [ C(RESULT_MISS) ] = 0,
69 },
70 },
71 [ C(ITLB) ] = {
72 [ C(OP_READ) ] = {
73 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
ba0cef3d 74 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
f22f54f4
PZ
75 },
76 [ C(OP_WRITE) ] = {
77 [ C(RESULT_ACCESS) ] = -1,
78 [ C(RESULT_MISS) ] = -1,
79 },
80 [ C(OP_PREFETCH) ] = {
81 [ C(RESULT_ACCESS) ] = -1,
82 [ C(RESULT_MISS) ] = -1,
83 },
84 },
85 [ C(BPU ) ] = {
86 [ C(OP_READ) ] = {
87 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
88 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
89 },
90 [ C(OP_WRITE) ] = {
91 [ C(RESULT_ACCESS) ] = -1,
92 [ C(RESULT_MISS) ] = -1,
93 },
94 [ C(OP_PREFETCH) ] = {
95 [ C(RESULT_ACCESS) ] = -1,
96 [ C(RESULT_MISS) ] = -1,
97 },
98 },
89d6c0b5
PZ
99 [ C(NODE) ] = {
100 [ C(OP_READ) ] = {
101 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
103 },
104 [ C(OP_WRITE) ] = {
105 [ C(RESULT_ACCESS) ] = -1,
106 [ C(RESULT_MISS) ] = -1,
107 },
108 [ C(OP_PREFETCH) ] = {
109 [ C(RESULT_ACCESS) ] = -1,
110 [ C(RESULT_MISS) ] = -1,
111 },
112 },
f22f54f4
PZ
113};
114
115/*
116 * AMD Performance Monitor K7 and later.
117 */
118static const u64 amd_perfmon_event_map[] =
119{
91fc4cc0
IM
120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
122 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
123 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
125 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
126 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
127 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
f22f54f4
PZ
128};
129
130static u64 amd_pmu_event_map(int hw_event)
131{
132 return amd_perfmon_event_map[hw_event];
133}
134
b4cdc5c2 135static int amd_pmu_hw_config(struct perf_event *event)
f22f54f4 136{
b4cdc5c2
PZ
137 int ret = x86_pmu_hw_config(event);
138
139 if (ret)
140 return ret;
141
2481c5fa
SE
142 if (has_branch_stack(event))
143 return -EOPNOTSUPP;
144
011af857
JR
145 if (event->attr.exclude_host && event->attr.exclude_guest)
146 /*
147 * When HO == GO == 1 the hardware treats that as GO == HO == 0
148 * and will count in both modes. We don't want to count in that
149 * case so we emulate no-counting by setting US = OS = 0.
150 */
151 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
152 ARCH_PERFMON_EVENTSEL_OS);
153 else if (event->attr.exclude_host)
154 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
155 else if (event->attr.exclude_guest)
156 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
157
b4cdc5c2
PZ
158 if (event->attr.type != PERF_TYPE_RAW)
159 return 0;
160
161 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
162
163 return 0;
f22f54f4
PZ
164}
165
166/*
167 * AMD64 events are detected based on their event codes.
168 */
4979d272
RR
169static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
170{
171 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
172}
173
f22f54f4
PZ
174static inline int amd_is_nb_event(struct hw_perf_event *hwc)
175{
176 return (hwc->config & 0xe0) == 0xe0;
177}
178
b38b24ea
PZ
179static inline int amd_has_nb(struct cpu_hw_events *cpuc)
180{
181 struct amd_nb *nb = cpuc->amd_nb;
182
183 return nb && nb->nb_id != -1;
184}
185
f22f54f4
PZ
186static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
187 struct perf_event *event)
188{
189 struct hw_perf_event *hwc = &event->hw;
190 struct amd_nb *nb = cpuc->amd_nb;
191 int i;
192
193 /*
194 * only care about NB events
195 */
b38b24ea 196 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
f22f54f4
PZ
197 return;
198
199 /*
200 * need to scan whole list because event may not have
201 * been assigned during scheduling
202 *
203 * no race condition possible because event can only
204 * be removed on one CPU at a time AND PMU is disabled
205 * when we come here
206 */
948b1bb8 207 for (i = 0; i < x86_pmu.num_counters; i++) {
f22f54f4
PZ
208 if (nb->owners[i] == event) {
209 cmpxchg(nb->owners+i, event, NULL);
210 break;
211 }
212 }
213}
214
215 /*
216 * AMD64 NorthBridge events need special treatment because
217 * counter access needs to be synchronized across all cores
218 * of a package. Refer to BKDG section 3.12
219 *
220 * NB events are events measuring L3 cache, Hypertransport
221 * traffic. They are identified by an event code >= 0xe00.
222 * They measure events on the NorthBride which is shared
223 * by all cores on a package. NB events are counted on a
224 * shared set of counters. When a NB event is programmed
225 * in a counter, the data actually comes from a shared
226 * counter. Thus, access to those counters needs to be
227 * synchronized.
228 *
229 * We implement the synchronization such that no two cores
230 * can be measuring NB events using the same counters. Thus,
231 * we maintain a per-NB allocation table. The available slot
232 * is propagated using the event_constraint structure.
233 *
234 * We provide only one choice for each NB event based on
235 * the fact that only NB events have restrictions. Consequently,
236 * if a counter is available, there is a guarantee the NB event
237 * will be assigned to it. If no slot is available, an empty
238 * constraint is returned and scheduling will eventually fail
239 * for this event.
240 *
241 * Note that all cores attached the same NB compete for the same
242 * counters to host NB events, this is why we use atomic ops. Some
243 * multi-chip CPUs may have more than one NB.
244 *
245 * Given that resources are allocated (cmpxchg), they must be
246 * eventually freed for others to use. This is accomplished by
247 * calling amd_put_event_constraints().
248 *
249 * Non NB events are not impacted by this restriction.
250 */
251static struct event_constraint *
252amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
253{
254 struct hw_perf_event *hwc = &event->hw;
255 struct amd_nb *nb = cpuc->amd_nb;
256 struct perf_event *old = NULL;
948b1bb8 257 int max = x86_pmu.num_counters;
f22f54f4
PZ
258 int i, j, k = -1;
259
260 /*
261 * if not NB event or no NB, then no constraints
262 */
b38b24ea 263 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
f22f54f4
PZ
264 return &unconstrained;
265
266 /*
267 * detect if already present, if so reuse
268 *
269 * cannot merge with actual allocation
270 * because of possible holes
271 *
272 * event can already be present yet not assigned (in hwc->idx)
273 * because of successive calls to x86_schedule_events() from
274 * hw_perf_group_sched_in() without hw_perf_enable()
275 */
276 for (i = 0; i < max; i++) {
277 /*
278 * keep track of first free slot
279 */
280 if (k == -1 && !nb->owners[i])
281 k = i;
282
283 /* already present, reuse */
284 if (nb->owners[i] == event)
285 goto done;
286 }
287 /*
288 * not present, so grab a new slot
289 * starting either at:
290 */
291 if (hwc->idx != -1) {
292 /* previous assignment */
293 i = hwc->idx;
294 } else if (k != -1) {
295 /* start from free slot found */
296 i = k;
297 } else {
298 /*
299 * event not found, no slot found in
300 * first pass, try again from the
301 * beginning
302 */
303 i = 0;
304 }
305 j = i;
306 do {
307 old = cmpxchg(nb->owners+i, NULL, event);
308 if (!old)
309 break;
310 if (++i == max)
311 i = 0;
312 } while (i != j);
313done:
314 if (!old)
315 return &nb->event_constraints[i];
316
317 return &emptyconstraint;
318}
319
c079c791 320static struct amd_nb *amd_alloc_nb(int cpu)
f22f54f4
PZ
321{
322 struct amd_nb *nb;
323 int i;
324
034c6efa
PZ
325 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
326 cpu_to_node(cpu));
f22f54f4
PZ
327 if (!nb)
328 return NULL;
329
c079c791 330 nb->nb_id = -1;
f22f54f4
PZ
331
332 /*
333 * initialize all possible NB constraints
334 */
948b1bb8 335 for (i = 0; i < x86_pmu.num_counters; i++) {
34538ee7 336 __set_bit(i, nb->event_constraints[i].idxmsk);
f22f54f4
PZ
337 nb->event_constraints[i].weight = 1;
338 }
339 return nb;
340}
341
b38b24ea
PZ
342static int amd_pmu_cpu_prepare(int cpu)
343{
344 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
345
346 WARN_ON_ONCE(cpuc->amd_nb);
347
348 if (boot_cpu_data.x86_max_cores < 2)
349 return NOTIFY_OK;
350
c079c791 351 cpuc->amd_nb = amd_alloc_nb(cpu);
b38b24ea
PZ
352 if (!cpuc->amd_nb)
353 return NOTIFY_BAD;
354
355 return NOTIFY_OK;
356}
357
358static void amd_pmu_cpu_starting(int cpu)
f22f54f4 359{
b38b24ea
PZ
360 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
361 struct amd_nb *nb;
f22f54f4
PZ
362 int i, nb_id;
363
1018faa6
JR
364 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
365
366 if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
f22f54f4
PZ
367 return;
368
f22f54f4 369 nb_id = amd_get_nb_id(cpu);
b38b24ea 370 WARN_ON_ONCE(nb_id == BAD_APICID);
f22f54f4 371
f22f54f4 372 for_each_online_cpu(i) {
b38b24ea
PZ
373 nb = per_cpu(cpu_hw_events, i).amd_nb;
374 if (WARN_ON_ONCE(!nb))
f22f54f4 375 continue;
f22f54f4 376
b38b24ea 377 if (nb->nb_id == nb_id) {
7fdba1ca 378 cpuc->kfree_on_online = cpuc->amd_nb;
b38b24ea
PZ
379 cpuc->amd_nb = nb;
380 break;
381 }
f22f54f4 382 }
b38b24ea
PZ
383
384 cpuc->amd_nb->nb_id = nb_id;
385 cpuc->amd_nb->refcnt++;
f22f54f4
PZ
386}
387
b38b24ea 388static void amd_pmu_cpu_dead(int cpu)
f22f54f4
PZ
389{
390 struct cpu_hw_events *cpuhw;
391
392 if (boot_cpu_data.x86_max_cores < 2)
393 return;
394
395 cpuhw = &per_cpu(cpu_hw_events, cpu);
396
a90110c6 397 if (cpuhw->amd_nb) {
b38b24ea
PZ
398 struct amd_nb *nb = cpuhw->amd_nb;
399
400 if (nb->nb_id == -1 || --nb->refcnt == 0)
401 kfree(nb);
f22f54f4 402
a90110c6
RW
403 cpuhw->amd_nb = NULL;
404 }
f22f54f4
PZ
405}
406
641cc938
JO
407PMU_FORMAT_ATTR(event, "config:0-7,32-35");
408PMU_FORMAT_ATTR(umask, "config:8-15" );
409PMU_FORMAT_ATTR(edge, "config:18" );
410PMU_FORMAT_ATTR(inv, "config:23" );
411PMU_FORMAT_ATTR(cmask, "config:24-31" );
412
413static struct attribute *amd_format_attr[] = {
414 &format_attr_event.attr,
415 &format_attr_umask.attr,
416 &format_attr_edge.attr,
417 &format_attr_inv.attr,
418 &format_attr_cmask.attr,
419 NULL,
420};
421
caaa8be3 422static __initconst const struct x86_pmu amd_pmu = {
3f6da390
PZ
423 .name = "AMD",
424 .handle_irq = x86_pmu_handle_irq,
425 .disable_all = x86_pmu_disable_all,
426 .enable_all = x86_pmu_enable_all,
427 .enable = x86_pmu_enable_event,
428 .disable = x86_pmu_disable_event,
b4cdc5c2 429 .hw_config = amd_pmu_hw_config,
a072738e 430 .schedule_events = x86_schedule_events,
3f6da390
PZ
431 .eventsel = MSR_K7_EVNTSEL0,
432 .perfctr = MSR_K7_PERFCTR0,
433 .event_map = amd_pmu_event_map,
3f6da390 434 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
ee5789db 435 .num_counters = AMD64_NUM_COUNTERS,
948b1bb8
RR
436 .cntval_bits = 48,
437 .cntval_mask = (1ULL << 48) - 1,
3f6da390
PZ
438 .apic = 1,
439 /* use highest bit to detect overflow */
440 .max_period = (1ULL << 47) - 1,
441 .get_event_constraints = amd_get_event_constraints,
442 .put_event_constraints = amd_put_event_constraints,
443
641cc938
JO
444 .format_attrs = amd_format_attr,
445
b38b24ea
PZ
446 .cpu_prepare = amd_pmu_cpu_prepare,
447 .cpu_starting = amd_pmu_cpu_starting,
448 .cpu_dead = amd_pmu_cpu_dead,
3f6da390
PZ
449};
450
4979d272
RR
451/* AMD Family 15h */
452
453#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
454
455#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
456#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
457#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
458#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
459#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
460#define AMD_EVENT_EX_LS 0x000000C0ULL
461#define AMD_EVENT_DE 0x000000D0ULL
462#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
463
464/*
465 * AMD family 15h event code/PMC mappings:
466 *
467 * type = event_code & 0x0F0:
468 *
469 * 0x000 FP PERF_CTL[5:3]
470 * 0x010 FP PERF_CTL[5:3]
471 * 0x020 LS PERF_CTL[5:0]
472 * 0x030 LS PERF_CTL[5:0]
473 * 0x040 DC PERF_CTL[5:0]
474 * 0x050 DC PERF_CTL[5:0]
475 * 0x060 CU PERF_CTL[2:0]
476 * 0x070 CU PERF_CTL[2:0]
477 * 0x080 IC/DE PERF_CTL[2:0]
478 * 0x090 IC/DE PERF_CTL[2:0]
479 * 0x0A0 ---
480 * 0x0B0 ---
481 * 0x0C0 EX/LS PERF_CTL[5:0]
482 * 0x0D0 DE PERF_CTL[2:0]
483 * 0x0E0 NB NB_PERF_CTL[3:0]
484 * 0x0F0 NB NB_PERF_CTL[3:0]
485 *
486 * Exceptions:
487 *
855357a2 488 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
4979d272 489 * 0x003 FP PERF_CTL[3]
855357a2 490 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
4979d272
RR
491 * 0x00B FP PERF_CTL[3]
492 * 0x00D FP PERF_CTL[3]
493 * 0x023 DE PERF_CTL[2:0]
494 * 0x02D LS PERF_CTL[3]
495 * 0x02E LS PERF_CTL[3,0]
496 * 0x043 CU PERF_CTL[2:0]
497 * 0x045 CU PERF_CTL[2:0]
498 * 0x046 CU PERF_CTL[2:0]
499 * 0x054 CU PERF_CTL[2:0]
500 * 0x055 CU PERF_CTL[2:0]
501 * 0x08F IC PERF_CTL[0]
502 * 0x187 DE PERF_CTL[0]
503 * 0x188 DE PERF_CTL[0]
504 * 0x0DB EX PERF_CTL[5:0]
505 * 0x0DC LS PERF_CTL[5:0]
506 * 0x0DD LS PERF_CTL[5:0]
507 * 0x0DE LS PERF_CTL[5:0]
508 * 0x0DF LS PERF_CTL[5:0]
509 * 0x1D6 EX PERF_CTL[5:0]
510 * 0x1D8 EX PERF_CTL[5:0]
855357a2
RR
511 *
512 * (*) depending on the umask all FPU counters may be used
4979d272
RR
513 */
514
515static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
516static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
517static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
bc1738f6 518static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
4979d272
RR
519static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
520static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
521
522static struct event_constraint *
523amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
524{
855357a2
RR
525 struct hw_perf_event *hwc = &event->hw;
526 unsigned int event_code = amd_get_event_code(hwc);
4979d272
RR
527
528 switch (event_code & AMD_EVENT_TYPE_MASK) {
529 case AMD_EVENT_FP:
530 switch (event_code) {
855357a2
RR
531 case 0x000:
532 if (!(hwc->config & 0x0000F000ULL))
533 break;
534 if (!(hwc->config & 0x00000F00ULL))
535 break;
536 return &amd_f15_PMC3;
537 case 0x004:
538 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
539 break;
540 return &amd_f15_PMC3;
4979d272
RR
541 case 0x003:
542 case 0x00B:
543 case 0x00D:
544 return &amd_f15_PMC3;
4979d272 545 }
855357a2 546 return &amd_f15_PMC53;
4979d272
RR
547 case AMD_EVENT_LS:
548 case AMD_EVENT_DC:
549 case AMD_EVENT_EX_LS:
550 switch (event_code) {
551 case 0x023:
552 case 0x043:
553 case 0x045:
554 case 0x046:
555 case 0x054:
556 case 0x055:
557 return &amd_f15_PMC20;
558 case 0x02D:
559 return &amd_f15_PMC3;
560 case 0x02E:
561 return &amd_f15_PMC30;
562 default:
563 return &amd_f15_PMC50;
564 }
565 case AMD_EVENT_CU:
566 case AMD_EVENT_IC_DE:
567 case AMD_EVENT_DE:
568 switch (event_code) {
569 case 0x08F:
570 case 0x187:
571 case 0x188:
572 return &amd_f15_PMC0;
573 case 0x0DB ... 0x0DF:
574 case 0x1D6:
575 case 0x1D8:
576 return &amd_f15_PMC50;
577 default:
578 return &amd_f15_PMC20;
579 }
580 case AMD_EVENT_NB:
581 /* not yet implemented */
582 return &emptyconstraint;
583 default:
584 return &emptyconstraint;
585 }
586}
587
588static __initconst const struct x86_pmu amd_pmu_f15h = {
589 .name = "AMD Family 15h",
590 .handle_irq = x86_pmu_handle_irq,
591 .disable_all = x86_pmu_disable_all,
592 .enable_all = x86_pmu_enable_all,
593 .enable = x86_pmu_enable_event,
594 .disable = x86_pmu_disable_event,
595 .hw_config = amd_pmu_hw_config,
596 .schedule_events = x86_schedule_events,
597 .eventsel = MSR_F15H_PERF_CTL,
598 .perfctr = MSR_F15H_PERF_CTR,
599 .event_map = amd_pmu_event_map,
600 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
ee5789db 601 .num_counters = AMD64_NUM_COUNTERS_F15H,
4979d272
RR
602 .cntval_bits = 48,
603 .cntval_mask = (1ULL << 48) - 1,
604 .apic = 1,
605 /* use highest bit to detect overflow */
606 .max_period = (1ULL << 47) - 1,
607 .get_event_constraints = amd_get_event_constraints_f15h,
608 /* nortbridge counters not yet implemented: */
609#if 0
610 .put_event_constraints = amd_put_event_constraints,
611
612 .cpu_prepare = amd_pmu_cpu_prepare,
4979d272
RR
613 .cpu_dead = amd_pmu_cpu_dead,
614#endif
1018faa6 615 .cpu_starting = amd_pmu_cpu_starting,
641cc938 616 .format_attrs = amd_format_attr,
4979d272
RR
617};
618
de0428a7 619__init int amd_pmu_init(void)
f22f54f4
PZ
620{
621 /* Performance-monitoring supported from K7 and later: */
622 if (boot_cpu_data.x86 < 6)
623 return -ENODEV;
624
4979d272
RR
625 /*
626 * If core performance counter extensions exists, it must be
627 * family 15h, otherwise fail. See x86_pmu_addr_offset().
628 */
629 switch (boot_cpu_data.x86) {
630 case 0x15:
631 if (!cpu_has_perfctr_core)
632 return -ENODEV;
633 x86_pmu = amd_pmu_f15h;
634 break;
635 default:
636 if (cpu_has_perfctr_core)
637 return -ENODEV;
638 x86_pmu = amd_pmu;
639 break;
640 }
f22f54f4
PZ
641
642 /* Events are common for all AMDs */
643 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
644 sizeof(hw_cache_event_ids));
645
f22f54f4
PZ
646 return 0;
647}
1018faa6
JR
648
649void amd_pmu_enable_virt(void)
650{
651 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
652
653 cpuc->perf_ctr_virt_mask = 0;
654
655 /* Reload all events */
656 x86_pmu_disable_all();
657 x86_pmu_enable_all(0);
658}
659EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
660
661void amd_pmu_disable_virt(void)
662{
663 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
664
665 /*
666 * We only mask out the Host-only bit so that host-only counting works
667 * when SVM is disabled. If someone sets up a guest-only counter when
668 * SVM is disabled the Guest-only bits still gets set and the counter
669 * will not count anything.
670 */
671 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
672
673 /* Reload all events */
674 x86_pmu_disable_all();
675 x86_pmu_enable_all(0);
676}
677EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);