4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
26 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
84 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
88 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
90 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
91 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
92 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
93 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
94 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
95 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
96 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
100 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
102 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
103 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
104 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
105 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
106 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
108 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
109 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
111 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
117 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
119 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
120 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
121 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
122 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
123 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
124 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
125 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
126 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
127 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
129 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
132 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
133 * siblings; disable these events because they can corrupt unrelated
136 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
137 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
138 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
139 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
143 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
145 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
146 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
147 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
151 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
156 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
158 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
159 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
160 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
164 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
165 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
166 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
171 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
172 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
173 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
174 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
178 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
179 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
180 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
182 struct attribute
*nhm_events_attrs
[] = {
183 EVENT_PTR(mem_ld_nhm
),
187 struct attribute
*snb_events_attrs
[] = {
188 EVENT_PTR(mem_ld_snb
),
189 EVENT_PTR(mem_st_snb
),
193 static u64
intel_pmu_event_map(int hw_event
)
195 return intel_perfmon_event_map
[hw_event
];
198 #define SNB_DMND_DATA_RD (1ULL << 0)
199 #define SNB_DMND_RFO (1ULL << 1)
200 #define SNB_DMND_IFETCH (1ULL << 2)
201 #define SNB_DMND_WB (1ULL << 3)
202 #define SNB_PF_DATA_RD (1ULL << 4)
203 #define SNB_PF_RFO (1ULL << 5)
204 #define SNB_PF_IFETCH (1ULL << 6)
205 #define SNB_LLC_DATA_RD (1ULL << 7)
206 #define SNB_LLC_RFO (1ULL << 8)
207 #define SNB_LLC_IFETCH (1ULL << 9)
208 #define SNB_BUS_LOCKS (1ULL << 10)
209 #define SNB_STRM_ST (1ULL << 11)
210 #define SNB_OTHER (1ULL << 15)
211 #define SNB_RESP_ANY (1ULL << 16)
212 #define SNB_NO_SUPP (1ULL << 17)
213 #define SNB_LLC_HITM (1ULL << 18)
214 #define SNB_LLC_HITE (1ULL << 19)
215 #define SNB_LLC_HITS (1ULL << 20)
216 #define SNB_LLC_HITF (1ULL << 21)
217 #define SNB_LOCAL (1ULL << 22)
218 #define SNB_REMOTE (0xffULL << 23)
219 #define SNB_SNP_NONE (1ULL << 31)
220 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
221 #define SNB_SNP_MISS (1ULL << 33)
222 #define SNB_NO_FWD (1ULL << 34)
223 #define SNB_SNP_FWD (1ULL << 35)
224 #define SNB_HITM (1ULL << 36)
225 #define SNB_NON_DRAM (1ULL << 37)
227 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
228 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
229 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
231 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
232 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
235 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
236 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
238 #define SNB_L3_ACCESS SNB_RESP_ANY
239 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
241 static __initconst
const u64 snb_hw_cache_extra_regs
242 [PERF_COUNT_HW_CACHE_MAX
]
243 [PERF_COUNT_HW_CACHE_OP_MAX
]
244 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
248 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
249 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
252 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
253 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
255 [ C(OP_PREFETCH
) ] = {
256 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
257 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
262 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
263 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
266 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
267 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
269 [ C(OP_PREFETCH
) ] = {
270 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
271 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
276 static __initconst
const u64 snb_hw_cache_event_ids
277 [PERF_COUNT_HW_CACHE_MAX
]
278 [PERF_COUNT_HW_CACHE_OP_MAX
]
279 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
283 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
284 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
287 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
288 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
290 [ C(OP_PREFETCH
) ] = {
291 [ C(RESULT_ACCESS
) ] = 0x0,
292 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
297 [ C(RESULT_ACCESS
) ] = 0x0,
298 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
301 [ C(RESULT_ACCESS
) ] = -1,
302 [ C(RESULT_MISS
) ] = -1,
304 [ C(OP_PREFETCH
) ] = {
305 [ C(RESULT_ACCESS
) ] = 0x0,
306 [ C(RESULT_MISS
) ] = 0x0,
311 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
312 [ C(RESULT_ACCESS
) ] = 0x01b7,
313 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
314 [ C(RESULT_MISS
) ] = 0x01b7,
317 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
318 [ C(RESULT_ACCESS
) ] = 0x01b7,
319 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
320 [ C(RESULT_MISS
) ] = 0x01b7,
322 [ C(OP_PREFETCH
) ] = {
323 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
324 [ C(RESULT_ACCESS
) ] = 0x01b7,
325 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
326 [ C(RESULT_MISS
) ] = 0x01b7,
331 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
332 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
335 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
336 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
338 [ C(OP_PREFETCH
) ] = {
339 [ C(RESULT_ACCESS
) ] = 0x0,
340 [ C(RESULT_MISS
) ] = 0x0,
345 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
346 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
349 [ C(RESULT_ACCESS
) ] = -1,
350 [ C(RESULT_MISS
) ] = -1,
352 [ C(OP_PREFETCH
) ] = {
353 [ C(RESULT_ACCESS
) ] = -1,
354 [ C(RESULT_MISS
) ] = -1,
359 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
360 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
363 [ C(RESULT_ACCESS
) ] = -1,
364 [ C(RESULT_MISS
) ] = -1,
366 [ C(OP_PREFETCH
) ] = {
367 [ C(RESULT_ACCESS
) ] = -1,
368 [ C(RESULT_MISS
) ] = -1,
373 [ C(RESULT_ACCESS
) ] = 0x01b7,
374 [ C(RESULT_MISS
) ] = 0x01b7,
377 [ C(RESULT_ACCESS
) ] = 0x01b7,
378 [ C(RESULT_MISS
) ] = 0x01b7,
380 [ C(OP_PREFETCH
) ] = {
381 [ C(RESULT_ACCESS
) ] = 0x01b7,
382 [ C(RESULT_MISS
) ] = 0x01b7,
388 static __initconst
const u64 westmere_hw_cache_event_ids
389 [PERF_COUNT_HW_CACHE_MAX
]
390 [PERF_COUNT_HW_CACHE_OP_MAX
]
391 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
395 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
396 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
399 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
400 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
402 [ C(OP_PREFETCH
) ] = {
403 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
404 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
409 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
410 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
413 [ C(RESULT_ACCESS
) ] = -1,
414 [ C(RESULT_MISS
) ] = -1,
416 [ C(OP_PREFETCH
) ] = {
417 [ C(RESULT_ACCESS
) ] = 0x0,
418 [ C(RESULT_MISS
) ] = 0x0,
423 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
424 [ C(RESULT_ACCESS
) ] = 0x01b7,
425 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
426 [ C(RESULT_MISS
) ] = 0x01b7,
429 * Use RFO, not WRITEBACK, because a write miss would typically occur
433 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
434 [ C(RESULT_ACCESS
) ] = 0x01b7,
435 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
436 [ C(RESULT_MISS
) ] = 0x01b7,
438 [ C(OP_PREFETCH
) ] = {
439 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
440 [ C(RESULT_ACCESS
) ] = 0x01b7,
441 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
442 [ C(RESULT_MISS
) ] = 0x01b7,
447 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
448 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
451 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
452 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
454 [ C(OP_PREFETCH
) ] = {
455 [ C(RESULT_ACCESS
) ] = 0x0,
456 [ C(RESULT_MISS
) ] = 0x0,
461 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
462 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
465 [ C(RESULT_ACCESS
) ] = -1,
466 [ C(RESULT_MISS
) ] = -1,
468 [ C(OP_PREFETCH
) ] = {
469 [ C(RESULT_ACCESS
) ] = -1,
470 [ C(RESULT_MISS
) ] = -1,
475 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
476 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
479 [ C(RESULT_ACCESS
) ] = -1,
480 [ C(RESULT_MISS
) ] = -1,
482 [ C(OP_PREFETCH
) ] = {
483 [ C(RESULT_ACCESS
) ] = -1,
484 [ C(RESULT_MISS
) ] = -1,
489 [ C(RESULT_ACCESS
) ] = 0x01b7,
490 [ C(RESULT_MISS
) ] = 0x01b7,
493 [ C(RESULT_ACCESS
) ] = 0x01b7,
494 [ C(RESULT_MISS
) ] = 0x01b7,
496 [ C(OP_PREFETCH
) ] = {
497 [ C(RESULT_ACCESS
) ] = 0x01b7,
498 [ C(RESULT_MISS
) ] = 0x01b7,
504 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
505 * See IA32 SDM Vol 3B 30.6.1.3
508 #define NHM_DMND_DATA_RD (1 << 0)
509 #define NHM_DMND_RFO (1 << 1)
510 #define NHM_DMND_IFETCH (1 << 2)
511 #define NHM_DMND_WB (1 << 3)
512 #define NHM_PF_DATA_RD (1 << 4)
513 #define NHM_PF_DATA_RFO (1 << 5)
514 #define NHM_PF_IFETCH (1 << 6)
515 #define NHM_OFFCORE_OTHER (1 << 7)
516 #define NHM_UNCORE_HIT (1 << 8)
517 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
518 #define NHM_OTHER_CORE_HITM (1 << 10)
520 #define NHM_REMOTE_CACHE_FWD (1 << 12)
521 #define NHM_REMOTE_DRAM (1 << 13)
522 #define NHM_LOCAL_DRAM (1 << 14)
523 #define NHM_NON_DRAM (1 << 15)
525 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
526 #define NHM_REMOTE (NHM_REMOTE_DRAM)
528 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
529 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
530 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
532 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
533 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
534 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
536 static __initconst
const u64 nehalem_hw_cache_extra_regs
537 [PERF_COUNT_HW_CACHE_MAX
]
538 [PERF_COUNT_HW_CACHE_OP_MAX
]
539 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
543 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
544 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
547 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
548 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
550 [ C(OP_PREFETCH
) ] = {
551 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
552 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
557 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
558 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
561 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
562 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
564 [ C(OP_PREFETCH
) ] = {
565 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
566 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
571 static __initconst
const u64 nehalem_hw_cache_event_ids
572 [PERF_COUNT_HW_CACHE_MAX
]
573 [PERF_COUNT_HW_CACHE_OP_MAX
]
574 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
578 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
579 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
582 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
583 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
585 [ C(OP_PREFETCH
) ] = {
586 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
587 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
592 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
593 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
596 [ C(RESULT_ACCESS
) ] = -1,
597 [ C(RESULT_MISS
) ] = -1,
599 [ C(OP_PREFETCH
) ] = {
600 [ C(RESULT_ACCESS
) ] = 0x0,
601 [ C(RESULT_MISS
) ] = 0x0,
606 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
607 [ C(RESULT_ACCESS
) ] = 0x01b7,
608 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
609 [ C(RESULT_MISS
) ] = 0x01b7,
612 * Use RFO, not WRITEBACK, because a write miss would typically occur
616 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
617 [ C(RESULT_ACCESS
) ] = 0x01b7,
618 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
619 [ C(RESULT_MISS
) ] = 0x01b7,
621 [ C(OP_PREFETCH
) ] = {
622 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
623 [ C(RESULT_ACCESS
) ] = 0x01b7,
624 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
625 [ C(RESULT_MISS
) ] = 0x01b7,
630 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
631 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
634 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
635 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
637 [ C(OP_PREFETCH
) ] = {
638 [ C(RESULT_ACCESS
) ] = 0x0,
639 [ C(RESULT_MISS
) ] = 0x0,
644 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
645 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
648 [ C(RESULT_ACCESS
) ] = -1,
649 [ C(RESULT_MISS
) ] = -1,
651 [ C(OP_PREFETCH
) ] = {
652 [ C(RESULT_ACCESS
) ] = -1,
653 [ C(RESULT_MISS
) ] = -1,
658 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
659 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
662 [ C(RESULT_ACCESS
) ] = -1,
663 [ C(RESULT_MISS
) ] = -1,
665 [ C(OP_PREFETCH
) ] = {
666 [ C(RESULT_ACCESS
) ] = -1,
667 [ C(RESULT_MISS
) ] = -1,
672 [ C(RESULT_ACCESS
) ] = 0x01b7,
673 [ C(RESULT_MISS
) ] = 0x01b7,
676 [ C(RESULT_ACCESS
) ] = 0x01b7,
677 [ C(RESULT_MISS
) ] = 0x01b7,
679 [ C(OP_PREFETCH
) ] = {
680 [ C(RESULT_ACCESS
) ] = 0x01b7,
681 [ C(RESULT_MISS
) ] = 0x01b7,
686 static __initconst
const u64 core2_hw_cache_event_ids
687 [PERF_COUNT_HW_CACHE_MAX
]
688 [PERF_COUNT_HW_CACHE_OP_MAX
]
689 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
693 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
694 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
697 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
698 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
700 [ C(OP_PREFETCH
) ] = {
701 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
702 [ C(RESULT_MISS
) ] = 0,
707 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
708 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
711 [ C(RESULT_ACCESS
) ] = -1,
712 [ C(RESULT_MISS
) ] = -1,
714 [ C(OP_PREFETCH
) ] = {
715 [ C(RESULT_ACCESS
) ] = 0,
716 [ C(RESULT_MISS
) ] = 0,
721 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
722 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
725 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
726 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
728 [ C(OP_PREFETCH
) ] = {
729 [ C(RESULT_ACCESS
) ] = 0,
730 [ C(RESULT_MISS
) ] = 0,
735 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
736 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
739 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
740 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
742 [ C(OP_PREFETCH
) ] = {
743 [ C(RESULT_ACCESS
) ] = 0,
744 [ C(RESULT_MISS
) ] = 0,
749 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
750 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
753 [ C(RESULT_ACCESS
) ] = -1,
754 [ C(RESULT_MISS
) ] = -1,
756 [ C(OP_PREFETCH
) ] = {
757 [ C(RESULT_ACCESS
) ] = -1,
758 [ C(RESULT_MISS
) ] = -1,
763 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
764 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
767 [ C(RESULT_ACCESS
) ] = -1,
768 [ C(RESULT_MISS
) ] = -1,
770 [ C(OP_PREFETCH
) ] = {
771 [ C(RESULT_ACCESS
) ] = -1,
772 [ C(RESULT_MISS
) ] = -1,
777 static __initconst
const u64 atom_hw_cache_event_ids
778 [PERF_COUNT_HW_CACHE_MAX
]
779 [PERF_COUNT_HW_CACHE_OP_MAX
]
780 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
784 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
785 [ C(RESULT_MISS
) ] = 0,
788 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
789 [ C(RESULT_MISS
) ] = 0,
791 [ C(OP_PREFETCH
) ] = {
792 [ C(RESULT_ACCESS
) ] = 0x0,
793 [ C(RESULT_MISS
) ] = 0,
798 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
799 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
802 [ C(RESULT_ACCESS
) ] = -1,
803 [ C(RESULT_MISS
) ] = -1,
805 [ C(OP_PREFETCH
) ] = {
806 [ C(RESULT_ACCESS
) ] = 0,
807 [ C(RESULT_MISS
) ] = 0,
812 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
813 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
816 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
817 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
819 [ C(OP_PREFETCH
) ] = {
820 [ C(RESULT_ACCESS
) ] = 0,
821 [ C(RESULT_MISS
) ] = 0,
826 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
827 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
830 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
831 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
833 [ C(OP_PREFETCH
) ] = {
834 [ C(RESULT_ACCESS
) ] = 0,
835 [ C(RESULT_MISS
) ] = 0,
840 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
841 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
844 [ C(RESULT_ACCESS
) ] = -1,
845 [ C(RESULT_MISS
) ] = -1,
847 [ C(OP_PREFETCH
) ] = {
848 [ C(RESULT_ACCESS
) ] = -1,
849 [ C(RESULT_MISS
) ] = -1,
854 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
855 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
858 [ C(RESULT_ACCESS
) ] = -1,
859 [ C(RESULT_MISS
) ] = -1,
861 [ C(OP_PREFETCH
) ] = {
862 [ C(RESULT_ACCESS
) ] = -1,
863 [ C(RESULT_MISS
) ] = -1,
868 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
870 /* user explicitly requested branch sampling */
871 if (has_branch_stack(event
))
874 /* implicit branch sampling to correct PEBS skid */
875 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
881 static void intel_pmu_disable_all(void)
883 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
885 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
887 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
888 intel_pmu_disable_bts();
890 intel_pmu_pebs_disable_all();
891 intel_pmu_lbr_disable_all();
894 static void intel_pmu_enable_all(int added
)
896 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
898 intel_pmu_pebs_enable_all();
899 intel_pmu_lbr_enable_all();
900 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
901 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
903 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
904 struct perf_event
*event
=
905 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
907 if (WARN_ON_ONCE(!event
))
910 intel_pmu_enable_bts(event
->hw
.config
);
916 * Intel Errata AAK100 (model 26)
917 * Intel Errata AAP53 (model 30)
918 * Intel Errata BD53 (model 44)
920 * The official story:
921 * These chips need to be 'reset' when adding counters by programming the
922 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
923 * in sequence on the same PMC or on different PMCs.
925 * In practise it appears some of these events do in fact count, and
926 * we need to programm all 4 events.
928 static void intel_pmu_nhm_workaround(void)
930 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
931 static const unsigned long nhm_magic
[4] = {
937 struct perf_event
*event
;
941 * The Errata requires below steps:
942 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
943 * 2) Configure 4 PERFEVTSELx with the magic events and clear
944 * the corresponding PMCx;
945 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
946 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
947 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
951 * The real steps we choose are a little different from above.
952 * A) To reduce MSR operations, we don't run step 1) as they
953 * are already cleared before this function is called;
954 * B) Call x86_perf_event_update to save PMCx before configuring
955 * PERFEVTSELx with magic number;
956 * C) With step 5), we do clear only when the PERFEVTSELx is
957 * not used currently.
958 * D) Call x86_perf_event_set_period to restore PMCx;
961 /* We always operate 4 pairs of PERF Counters */
962 for (i
= 0; i
< 4; i
++) {
963 event
= cpuc
->events
[i
];
965 x86_perf_event_update(event
);
968 for (i
= 0; i
< 4; i
++) {
969 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
970 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
973 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
974 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
976 for (i
= 0; i
< 4; i
++) {
977 event
= cpuc
->events
[i
];
980 x86_perf_event_set_period(event
);
981 __x86_pmu_enable_event(&event
->hw
,
982 ARCH_PERFMON_EVENTSEL_ENABLE
);
984 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
988 static void intel_pmu_nhm_enable_all(int added
)
991 intel_pmu_nhm_workaround();
992 intel_pmu_enable_all(added
);
995 static inline u64
intel_pmu_get_status(void)
999 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1004 static inline void intel_pmu_ack_status(u64 ack
)
1006 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1009 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
1011 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1014 mask
= 0xfULL
<< (idx
* 4);
1016 rdmsrl(hwc
->config_base
, ctrl_val
);
1018 wrmsrl(hwc
->config_base
, ctrl_val
);
1021 static void intel_pmu_disable_event(struct perf_event
*event
)
1023 struct hw_perf_event
*hwc
= &event
->hw
;
1024 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1026 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1027 intel_pmu_disable_bts();
1028 intel_pmu_drain_bts_buffer();
1032 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1033 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1036 * must disable before any actual event
1037 * because any event may be combined with LBR
1039 if (intel_pmu_needs_lbr_smpl(event
))
1040 intel_pmu_lbr_disable(event
);
1042 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1043 intel_pmu_disable_fixed(hwc
);
1047 x86_pmu_disable_event(event
);
1049 if (unlikely(event
->attr
.precise_ip
))
1050 intel_pmu_pebs_disable(event
);
1053 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1055 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1056 u64 ctrl_val
, bits
, mask
;
1059 * Enable IRQ generation (0x8),
1060 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1064 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1066 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1070 * ANY bit is supported in v3 and up
1072 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1076 mask
= 0xfULL
<< (idx
* 4);
1078 rdmsrl(hwc
->config_base
, ctrl_val
);
1081 wrmsrl(hwc
->config_base
, ctrl_val
);
1084 static void intel_pmu_enable_event(struct perf_event
*event
)
1086 struct hw_perf_event
*hwc
= &event
->hw
;
1087 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1089 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1090 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1093 intel_pmu_enable_bts(hwc
->config
);
1097 * must enabled before any actual event
1098 * because any event may be combined with LBR
1100 if (intel_pmu_needs_lbr_smpl(event
))
1101 intel_pmu_lbr_enable(event
);
1103 if (event
->attr
.exclude_host
)
1104 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1105 if (event
->attr
.exclude_guest
)
1106 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1108 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1109 intel_pmu_enable_fixed(hwc
);
1113 if (unlikely(event
->attr
.precise_ip
))
1114 intel_pmu_pebs_enable(event
);
1116 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1120 * Save and restart an expired event. Called by NMI contexts,
1121 * so it has to be careful about preempting normal event ops:
1123 int intel_pmu_save_and_restart(struct perf_event
*event
)
1125 x86_perf_event_update(event
);
1126 return x86_perf_event_set_period(event
);
1129 static void intel_pmu_reset(void)
1131 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1132 unsigned long flags
;
1135 if (!x86_pmu
.num_counters
)
1138 local_irq_save(flags
);
1140 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1142 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1143 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1144 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1146 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1147 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1150 ds
->bts_index
= ds
->bts_buffer_base
;
1152 local_irq_restore(flags
);
1156 * This handler is triggered by the local APIC, so the APIC IRQ handling
1159 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1161 struct perf_sample_data data
;
1162 struct cpu_hw_events
*cpuc
;
1167 cpuc
= &__get_cpu_var(cpu_hw_events
);
1170 * Some chipsets need to unmask the LVTPC in a particular spot
1171 * inside the nmi handler. As a result, the unmasking was pushed
1172 * into all the nmi handlers.
1174 * This handler doesn't seem to have any issues with the unmasking
1175 * so it was left at the top.
1177 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1179 intel_pmu_disable_all();
1180 handled
= intel_pmu_drain_bts_buffer();
1181 status
= intel_pmu_get_status();
1183 intel_pmu_enable_all(0);
1189 intel_pmu_ack_status(status
);
1190 if (++loops
> 100) {
1191 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1192 perf_event_print_debug();
1197 inc_irq_stat(apic_perf_irqs
);
1199 intel_pmu_lbr_read();
1202 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1203 * and clear the bit.
1205 if (__test_and_clear_bit(63, (unsigned long *)&status
)) {
1211 * PEBS overflow sets bit 62 in the global status register
1213 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1215 x86_pmu
.drain_pebs(regs
);
1218 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1219 struct perf_event
*event
= cpuc
->events
[bit
];
1223 if (!test_bit(bit
, cpuc
->active_mask
))
1226 if (!intel_pmu_save_and_restart(event
))
1229 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1231 if (has_branch_stack(event
))
1232 data
.br_stack
= &cpuc
->lbr_stack
;
1234 if (perf_event_overflow(event
, &data
, regs
))
1235 x86_pmu_stop(event
, 0);
1239 * Repeat if there is more work to be done:
1241 status
= intel_pmu_get_status();
1246 intel_pmu_enable_all(0);
1250 static struct event_constraint
*
1251 intel_bts_constraints(struct perf_event
*event
)
1253 struct hw_perf_event
*hwc
= &event
->hw
;
1254 unsigned int hw_event
, bts_event
;
1256 if (event
->attr
.freq
)
1259 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1260 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1262 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1263 return &bts_constraint
;
1268 static int intel_alt_er(int idx
)
1270 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1273 if (idx
== EXTRA_REG_RSP_0
)
1274 return EXTRA_REG_RSP_1
;
1276 if (idx
== EXTRA_REG_RSP_1
)
1277 return EXTRA_REG_RSP_0
;
1282 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1284 event
->hw
.extra_reg
.idx
= idx
;
1286 if (idx
== EXTRA_REG_RSP_0
) {
1287 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1288 event
->hw
.config
|= 0x01b7;
1289 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1290 } else if (idx
== EXTRA_REG_RSP_1
) {
1291 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1292 event
->hw
.config
|= 0x01bb;
1293 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1298 * manage allocation of shared extra msr for certain events
1301 * per-cpu: to be shared between the various events on a single PMU
1302 * per-core: per-cpu + shared by HT threads
1304 static struct event_constraint
*
1305 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1306 struct perf_event
*event
,
1307 struct hw_perf_event_extra
*reg
)
1309 struct event_constraint
*c
= &emptyconstraint
;
1310 struct er_account
*era
;
1311 unsigned long flags
;
1315 * reg->alloc can be set due to existing state, so for fake cpuc we
1316 * need to ignore this, otherwise we might fail to allocate proper fake
1317 * state for this extra reg constraint. Also see the comment below.
1319 if (reg
->alloc
&& !cpuc
->is_fake
)
1320 return NULL
; /* call x86_get_event_constraint() */
1323 era
= &cpuc
->shared_regs
->regs
[idx
];
1325 * we use spin_lock_irqsave() to avoid lockdep issues when
1326 * passing a fake cpuc
1328 raw_spin_lock_irqsave(&era
->lock
, flags
);
1330 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1333 * If its a fake cpuc -- as per validate_{group,event}() we
1334 * shouldn't touch event state and we can avoid doing so
1335 * since both will only call get_event_constraints() once
1336 * on each event, this avoids the need for reg->alloc.
1338 * Not doing the ER fixup will only result in era->reg being
1339 * wrong, but since we won't actually try and program hardware
1340 * this isn't a problem either.
1342 if (!cpuc
->is_fake
) {
1343 if (idx
!= reg
->idx
)
1344 intel_fixup_er(event
, idx
);
1347 * x86_schedule_events() can call get_event_constraints()
1348 * multiple times on events in the case of incremental
1349 * scheduling(). reg->alloc ensures we only do the ER
1355 /* lock in msr value */
1356 era
->config
= reg
->config
;
1357 era
->reg
= reg
->reg
;
1360 atomic_inc(&era
->ref
);
1363 * need to call x86_get_event_constraint()
1364 * to check if associated event has constraints
1368 idx
= intel_alt_er(idx
);
1369 if (idx
!= reg
->idx
) {
1370 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1374 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1380 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1381 struct hw_perf_event_extra
*reg
)
1383 struct er_account
*era
;
1386 * Only put constraint if extra reg was actually allocated. Also takes
1387 * care of event which do not use an extra shared reg.
1389 * Also, if this is a fake cpuc we shouldn't touch any event state
1390 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1391 * either since it'll be thrown out.
1393 if (!reg
->alloc
|| cpuc
->is_fake
)
1396 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1398 /* one fewer user */
1399 atomic_dec(&era
->ref
);
1401 /* allocate again next time */
1405 static struct event_constraint
*
1406 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1407 struct perf_event
*event
)
1409 struct event_constraint
*c
= NULL
, *d
;
1410 struct hw_perf_event_extra
*xreg
, *breg
;
1412 xreg
= &event
->hw
.extra_reg
;
1413 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1414 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1415 if (c
== &emptyconstraint
)
1418 breg
= &event
->hw
.branch_reg
;
1419 if (breg
->idx
!= EXTRA_REG_NONE
) {
1420 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1421 if (d
== &emptyconstraint
) {
1422 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1429 struct event_constraint
*
1430 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1432 struct event_constraint
*c
;
1434 if (x86_pmu
.event_constraints
) {
1435 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1436 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
1437 /* hw.flags zeroed at initialization */
1438 event
->hw
.flags
|= c
->flags
;
1444 return &unconstrained
;
1447 static struct event_constraint
*
1448 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1450 struct event_constraint
*c
;
1452 c
= intel_bts_constraints(event
);
1456 c
= intel_pebs_constraints(event
);
1460 c
= intel_shared_regs_constraints(cpuc
, event
);
1464 return x86_get_event_constraints(cpuc
, event
);
1468 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1469 struct perf_event
*event
)
1471 struct hw_perf_event_extra
*reg
;
1473 reg
= &event
->hw
.extra_reg
;
1474 if (reg
->idx
!= EXTRA_REG_NONE
)
1475 __intel_shared_reg_put_constraints(cpuc
, reg
);
1477 reg
= &event
->hw
.branch_reg
;
1478 if (reg
->idx
!= EXTRA_REG_NONE
)
1479 __intel_shared_reg_put_constraints(cpuc
, reg
);
1482 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1483 struct perf_event
*event
)
1485 event
->hw
.flags
= 0;
1486 intel_put_shared_regs_event_constraints(cpuc
, event
);
1489 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1491 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1493 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1494 * (0x003c) so that we can use it with PEBS.
1496 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1497 * PEBS capable. However we can use INST_RETIRED.ANY_P
1498 * (0x00c0), which is a PEBS capable event, to get the same
1501 * INST_RETIRED.ANY_P counts the number of cycles that retires
1502 * CNTMASK instructions. By setting CNTMASK to a value (16)
1503 * larger than the maximum number of instructions that can be
1504 * retired per cycle (4) and then inverting the condition, we
1505 * count all cycles that retire 16 or less instructions, which
1508 * Thereby we gain a PEBS capable cycle counter.
1510 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1512 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1513 event
->hw
.config
= alt_config
;
1517 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1519 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1521 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1522 * (0x003c) so that we can use it with PEBS.
1524 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1525 * PEBS capable. However we can use UOPS_RETIRED.ALL
1526 * (0x01c2), which is a PEBS capable event, to get the same
1529 * UOPS_RETIRED.ALL counts the number of cycles that retires
1530 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1531 * larger than the maximum number of micro-ops that can be
1532 * retired per cycle (4) and then inverting the condition, we
1533 * count all cycles that retire 16 or less micro-ops, which
1536 * Thereby we gain a PEBS capable cycle counter.
1538 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1540 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1541 event
->hw
.config
= alt_config
;
1545 static int intel_pmu_hw_config(struct perf_event
*event
)
1547 int ret
= x86_pmu_hw_config(event
);
1552 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1553 x86_pmu
.pebs_aliases(event
);
1555 if (intel_pmu_needs_lbr_smpl(event
)) {
1556 ret
= intel_pmu_setup_lbr_filter(event
);
1561 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1564 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1567 if (x86_pmu
.version
< 3)
1570 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1573 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1578 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1580 if (x86_pmu
.guest_get_msrs
)
1581 return x86_pmu
.guest_get_msrs(nr
);
1585 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1587 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1589 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1590 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1592 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1593 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1594 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1596 * If PMU counter has PEBS enabled it is not enough to disable counter
1597 * on a guest entry since PEBS memory write can overshoot guest entry
1598 * and corrupt guest memory. Disabling PEBS solves the problem.
1600 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1601 arr
[1].host
= cpuc
->pebs_enabled
;
1608 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1610 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1611 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1614 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1615 struct perf_event
*event
= cpuc
->events
[idx
];
1617 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1618 arr
[idx
].host
= arr
[idx
].guest
= 0;
1620 if (!test_bit(idx
, cpuc
->active_mask
))
1623 arr
[idx
].host
= arr
[idx
].guest
=
1624 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1626 if (event
->attr
.exclude_host
)
1627 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1628 else if (event
->attr
.exclude_guest
)
1629 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1632 *nr
= x86_pmu
.num_counters
;
1636 static void core_pmu_enable_event(struct perf_event
*event
)
1638 if (!event
->attr
.exclude_host
)
1639 x86_pmu_enable_event(event
);
1642 static void core_pmu_enable_all(int added
)
1644 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1647 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1648 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1650 if (!test_bit(idx
, cpuc
->active_mask
) ||
1651 cpuc
->events
[idx
]->attr
.exclude_host
)
1654 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1658 PMU_FORMAT_ATTR(event
, "config:0-7" );
1659 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1660 PMU_FORMAT_ATTR(edge
, "config:18" );
1661 PMU_FORMAT_ATTR(pc
, "config:19" );
1662 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1663 PMU_FORMAT_ATTR(inv
, "config:23" );
1664 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1666 static struct attribute
*intel_arch_formats_attr
[] = {
1667 &format_attr_event
.attr
,
1668 &format_attr_umask
.attr
,
1669 &format_attr_edge
.attr
,
1670 &format_attr_pc
.attr
,
1671 &format_attr_inv
.attr
,
1672 &format_attr_cmask
.attr
,
1676 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
1678 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
1680 return x86_event_sysfs_show(page
, config
, event
);
1683 static __initconst
const struct x86_pmu core_pmu
= {
1685 .handle_irq
= x86_pmu_handle_irq
,
1686 .disable_all
= x86_pmu_disable_all
,
1687 .enable_all
= core_pmu_enable_all
,
1688 .enable
= core_pmu_enable_event
,
1689 .disable
= x86_pmu_disable_event
,
1690 .hw_config
= x86_pmu_hw_config
,
1691 .schedule_events
= x86_schedule_events
,
1692 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1693 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1694 .event_map
= intel_pmu_event_map
,
1695 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1698 * Intel PMCs cannot be accessed sanely above 32 bit width,
1699 * so we install an artificial 1<<31 period regardless of
1700 * the generic event period:
1702 .max_period
= (1ULL << 31) - 1,
1703 .get_event_constraints
= intel_get_event_constraints
,
1704 .put_event_constraints
= intel_put_event_constraints
,
1705 .event_constraints
= intel_core_event_constraints
,
1706 .guest_get_msrs
= core_guest_get_msrs
,
1707 .format_attrs
= intel_arch_formats_attr
,
1708 .events_sysfs_show
= intel_event_sysfs_show
,
1711 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1713 struct intel_shared_regs
*regs
;
1716 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1717 GFP_KERNEL
, cpu_to_node(cpu
));
1720 * initialize the locks to keep lockdep happy
1722 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1723 raw_spin_lock_init(®s
->regs
[i
].lock
);
1730 static int intel_pmu_cpu_prepare(int cpu
)
1732 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1734 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1737 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1738 if (!cpuc
->shared_regs
)
1744 static void intel_pmu_cpu_starting(int cpu
)
1746 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1747 int core_id
= topology_core_id(cpu
);
1750 init_debug_store_on_cpu(cpu
);
1752 * Deal with CPUs that don't clear their LBRs on power-up.
1754 intel_pmu_lbr_reset();
1756 cpuc
->lbr_sel
= NULL
;
1758 if (!cpuc
->shared_regs
)
1761 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
1762 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1763 struct intel_shared_regs
*pc
;
1765 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1766 if (pc
&& pc
->core_id
== core_id
) {
1767 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
1768 cpuc
->shared_regs
= pc
;
1772 cpuc
->shared_regs
->core_id
= core_id
;
1773 cpuc
->shared_regs
->refcnt
++;
1776 if (x86_pmu
.lbr_sel_map
)
1777 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
1780 static void intel_pmu_cpu_dying(int cpu
)
1782 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1783 struct intel_shared_regs
*pc
;
1785 pc
= cpuc
->shared_regs
;
1787 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1789 cpuc
->shared_regs
= NULL
;
1792 fini_debug_store_on_cpu(cpu
);
1795 static void intel_pmu_flush_branch_stack(void)
1798 * Intel LBR does not tag entries with the
1799 * PID of the current task, then we need to
1801 * For now, we simply reset it
1804 intel_pmu_lbr_reset();
1807 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
1809 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
1811 static struct attribute
*intel_arch3_formats_attr
[] = {
1812 &format_attr_event
.attr
,
1813 &format_attr_umask
.attr
,
1814 &format_attr_edge
.attr
,
1815 &format_attr_pc
.attr
,
1816 &format_attr_any
.attr
,
1817 &format_attr_inv
.attr
,
1818 &format_attr_cmask
.attr
,
1820 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
1821 &format_attr_ldlat
.attr
, /* PEBS load latency */
1825 static __initconst
const struct x86_pmu intel_pmu
= {
1827 .handle_irq
= intel_pmu_handle_irq
,
1828 .disable_all
= intel_pmu_disable_all
,
1829 .enable_all
= intel_pmu_enable_all
,
1830 .enable
= intel_pmu_enable_event
,
1831 .disable
= intel_pmu_disable_event
,
1832 .hw_config
= intel_pmu_hw_config
,
1833 .schedule_events
= x86_schedule_events
,
1834 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1835 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1836 .event_map
= intel_pmu_event_map
,
1837 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1840 * Intel PMCs cannot be accessed sanely above 32 bit width,
1841 * so we install an artificial 1<<31 period regardless of
1842 * the generic event period:
1844 .max_period
= (1ULL << 31) - 1,
1845 .get_event_constraints
= intel_get_event_constraints
,
1846 .put_event_constraints
= intel_put_event_constraints
,
1847 .pebs_aliases
= intel_pebs_aliases_core2
,
1849 .format_attrs
= intel_arch3_formats_attr
,
1850 .events_sysfs_show
= intel_event_sysfs_show
,
1852 .cpu_prepare
= intel_pmu_cpu_prepare
,
1853 .cpu_starting
= intel_pmu_cpu_starting
,
1854 .cpu_dying
= intel_pmu_cpu_dying
,
1855 .guest_get_msrs
= intel_guest_get_msrs
,
1856 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
1859 static __init
void intel_clovertown_quirk(void)
1862 * PEBS is unreliable due to:
1864 * AJ67 - PEBS may experience CPL leaks
1865 * AJ68 - PEBS PMI may be delayed by one event
1866 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1867 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1869 * AJ67 could be worked around by restricting the OS/USR flags.
1870 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1872 * AJ106 could possibly be worked around by not allowing LBR
1873 * usage from PEBS, including the fixup.
1874 * AJ68 could possibly be worked around by always programming
1875 * a pebs_event_reset[0] value and coping with the lost events.
1877 * But taken together it might just make sense to not enable PEBS on
1880 pr_warn("PEBS disabled due to CPU errata\n");
1882 x86_pmu
.pebs_constraints
= NULL
;
1885 static int intel_snb_pebs_broken(int cpu
)
1887 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
1889 switch (cpu_data(cpu
).x86_model
) {
1894 case 45: /* SNB-EP */
1895 switch (cpu_data(cpu
).x86_mask
) {
1896 case 6: rev
= 0x618; break;
1897 case 7: rev
= 0x70c; break;
1901 return (cpu_data(cpu
).microcode
< rev
);
1904 static void intel_snb_check_microcode(void)
1906 int pebs_broken
= 0;
1910 for_each_online_cpu(cpu
) {
1911 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
1916 if (pebs_broken
== x86_pmu
.pebs_broken
)
1920 * Serialized by the microcode lock..
1922 if (x86_pmu
.pebs_broken
) {
1923 pr_info("PEBS enabled due to microcode update\n");
1924 x86_pmu
.pebs_broken
= 0;
1926 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1927 x86_pmu
.pebs_broken
= 1;
1931 static __init
void intel_sandybridge_quirk(void)
1933 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
1934 intel_snb_check_microcode();
1937 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
1938 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
1939 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
1940 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
1941 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
1942 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
1943 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
1944 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
1947 static __init
void intel_arch_events_quirk(void)
1951 /* disable event that reported as not presend by cpuid */
1952 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
1953 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
1954 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1955 intel_arch_events_map
[bit
].name
);
1959 static __init
void intel_nehalem_quirk(void)
1961 union cpuid10_ebx ebx
;
1963 ebx
.full
= x86_pmu
.events_maskl
;
1964 if (ebx
.split
.no_branch_misses_retired
) {
1966 * Erratum AAJ80 detected, we work it around by using
1967 * the BR_MISP_EXEC.ANY event. This will over-count
1968 * branch-misses, but it's still much better than the
1969 * architectural event which is often completely bogus:
1971 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1972 ebx
.split
.no_branch_misses_retired
= 0;
1973 x86_pmu
.events_maskl
= ebx
.full
;
1974 pr_info("CPU erratum AAJ80 worked around\n");
1978 __init
int intel_pmu_init(void)
1980 union cpuid10_edx edx
;
1981 union cpuid10_eax eax
;
1982 union cpuid10_ebx ebx
;
1983 struct event_constraint
*c
;
1984 unsigned int unused
;
1987 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1988 switch (boot_cpu_data
.x86
) {
1990 return p6_pmu_init();
1992 return knc_pmu_init();
1994 return p4_pmu_init();
2000 * Check whether the Architectural PerfMon supports
2001 * Branch Misses Retired hw_event or not.
2003 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
2004 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
2007 version
= eax
.split
.version_id
;
2011 x86_pmu
= intel_pmu
;
2013 x86_pmu
.version
= version
;
2014 x86_pmu
.num_counters
= eax
.split
.num_counters
;
2015 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
2016 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
2018 x86_pmu
.events_maskl
= ebx
.full
;
2019 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
2021 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
2024 * Quirk: v2 perfmon does not report fixed-purpose events, so
2025 * assume at least 3 events:
2028 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
2031 * v2 and above have a perf capabilities MSR
2036 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
2037 x86_pmu
.intel_cap
.capabilities
= capabilities
;
2042 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
2045 * Install the hw-cache-events table:
2047 switch (boot_cpu_data
.x86_model
) {
2048 case 14: /* 65 nm core solo/duo, "Yonah" */
2049 pr_cont("Core events, ");
2052 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2053 x86_add_quirk(intel_clovertown_quirk
);
2054 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2055 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2056 case 29: /* six-core 45 nm xeon "Dunnington" */
2057 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2058 sizeof(hw_cache_event_ids
));
2060 intel_pmu_lbr_init_core();
2062 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2063 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2064 pr_cont("Core2 events, ");
2067 case 26: /* 45 nm nehalem, "Bloomfield" */
2068 case 30: /* 45 nm nehalem, "Lynnfield" */
2069 case 46: /* 45 nm nehalem-ex, "Beckton" */
2070 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2071 sizeof(hw_cache_event_ids
));
2072 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2073 sizeof(hw_cache_extra_regs
));
2075 intel_pmu_lbr_init_nhm();
2077 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2078 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2079 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2080 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2082 x86_pmu
.cpu_events
= nhm_events_attrs
;
2084 /* UOPS_ISSUED.STALLED_CYCLES */
2085 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2086 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2087 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2088 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2089 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2091 intel_pmu_pebs_data_source_nhm();
2092 x86_add_quirk(intel_nehalem_quirk
);
2094 pr_cont("Nehalem events, ");
2098 case 38: /* Lincroft */
2099 case 39: /* Penwell */
2100 case 53: /* Cloverview */
2101 case 54: /* Cedarview */
2102 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2103 sizeof(hw_cache_event_ids
));
2105 intel_pmu_lbr_init_atom();
2107 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2108 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2109 pr_cont("Atom events, ");
2112 case 37: /* 32 nm nehalem, "Clarkdale" */
2113 case 44: /* 32 nm nehalem, "Gulftown" */
2114 case 47: /* 32 nm Xeon E7 */
2115 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2116 sizeof(hw_cache_event_ids
));
2117 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2118 sizeof(hw_cache_extra_regs
));
2120 intel_pmu_lbr_init_nhm();
2122 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2123 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2124 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2125 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2126 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2128 x86_pmu
.cpu_events
= nhm_events_attrs
;
2130 /* UOPS_ISSUED.STALLED_CYCLES */
2131 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2132 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2133 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2134 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2135 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2137 intel_pmu_pebs_data_source_nhm();
2138 pr_cont("Westmere events, ");
2141 case 42: /* SandyBridge */
2142 case 45: /* SandyBridge, "Romely-EP" */
2143 x86_add_quirk(intel_sandybridge_quirk
);
2144 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2145 sizeof(hw_cache_event_ids
));
2146 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2147 sizeof(hw_cache_extra_regs
));
2149 intel_pmu_lbr_init_snb();
2151 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2152 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2153 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2154 if (boot_cpu_data
.x86_model
== 45)
2155 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2157 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2158 /* all extra regs are per-cpu when HT is on */
2159 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2160 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2162 x86_pmu
.cpu_events
= snb_events_attrs
;
2164 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2165 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2166 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2167 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2168 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2169 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2171 pr_cont("SandyBridge events, ");
2173 case 58: /* IvyBridge */
2174 case 62: /* IvyBridge EP */
2175 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2176 sizeof(hw_cache_event_ids
));
2177 /* dTLB-load-misses on IVB is different than SNB */
2178 hw_cache_event_ids
[C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2180 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2181 sizeof(hw_cache_extra_regs
));
2183 intel_pmu_lbr_init_snb();
2185 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2186 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2187 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2188 if (boot_cpu_data
.x86_model
== 62)
2189 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2191 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2192 /* all extra regs are per-cpu when HT is on */
2193 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2194 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2196 x86_pmu
.cpu_events
= snb_events_attrs
;
2198 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2199 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2200 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2202 pr_cont("IvyBridge events, ");
2207 switch (x86_pmu
.version
) {
2209 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2210 pr_cont("generic architected perfmon v1, ");
2214 * default constraints for v2 and up
2216 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2217 pr_cont("generic architected perfmon, ");
2222 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2223 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2224 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2225 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2227 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2229 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2230 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2231 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2232 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2235 x86_pmu
.intel_ctrl
|=
2236 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2238 if (x86_pmu
.event_constraints
) {
2240 * event on fixed counter2 (REF_CYCLES) only works on this
2241 * counter, so do not extend mask to generic counters
2243 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2244 if (c
->cmask
== X86_RAW_EVENT_MASK
2245 && c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2246 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2251 ~(~0ULL << (INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
));
2252 c
->weight
= hweight64(c
->idxmsk64
);