4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
26 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
87 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
89 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
90 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
91 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
92 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
93 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
94 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
95 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
99 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
105 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
106 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
108 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
110 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
116 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
117 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
118 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
119 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
120 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
121 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
122 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
123 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
124 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
126 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
127 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
128 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
129 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
130 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
131 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
135 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
137 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
138 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
142 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
147 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
149 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
150 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
151 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
155 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
156 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
157 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
161 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
162 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
163 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
167 static u64
intel_pmu_event_map(int hw_event
)
169 return intel_perfmon_event_map
[hw_event
];
172 #define SNB_DMND_DATA_RD (1ULL << 0)
173 #define SNB_DMND_RFO (1ULL << 1)
174 #define SNB_DMND_IFETCH (1ULL << 2)
175 #define SNB_DMND_WB (1ULL << 3)
176 #define SNB_PF_DATA_RD (1ULL << 4)
177 #define SNB_PF_RFO (1ULL << 5)
178 #define SNB_PF_IFETCH (1ULL << 6)
179 #define SNB_LLC_DATA_RD (1ULL << 7)
180 #define SNB_LLC_RFO (1ULL << 8)
181 #define SNB_LLC_IFETCH (1ULL << 9)
182 #define SNB_BUS_LOCKS (1ULL << 10)
183 #define SNB_STRM_ST (1ULL << 11)
184 #define SNB_OTHER (1ULL << 15)
185 #define SNB_RESP_ANY (1ULL << 16)
186 #define SNB_NO_SUPP (1ULL << 17)
187 #define SNB_LLC_HITM (1ULL << 18)
188 #define SNB_LLC_HITE (1ULL << 19)
189 #define SNB_LLC_HITS (1ULL << 20)
190 #define SNB_LLC_HITF (1ULL << 21)
191 #define SNB_LOCAL (1ULL << 22)
192 #define SNB_REMOTE (0xffULL << 23)
193 #define SNB_SNP_NONE (1ULL << 31)
194 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
195 #define SNB_SNP_MISS (1ULL << 33)
196 #define SNB_NO_FWD (1ULL << 34)
197 #define SNB_SNP_FWD (1ULL << 35)
198 #define SNB_HITM (1ULL << 36)
199 #define SNB_NON_DRAM (1ULL << 37)
201 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
202 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
203 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
205 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
206 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
209 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
210 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
212 #define SNB_L3_ACCESS SNB_RESP_ANY
213 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
215 static __initconst
const u64 snb_hw_cache_extra_regs
216 [PERF_COUNT_HW_CACHE_MAX
]
217 [PERF_COUNT_HW_CACHE_OP_MAX
]
218 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
222 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
223 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
226 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
227 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
229 [ C(OP_PREFETCH
) ] = {
230 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
231 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
236 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
237 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
240 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
241 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
243 [ C(OP_PREFETCH
) ] = {
244 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
245 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
250 static __initconst
const u64 snb_hw_cache_event_ids
251 [PERF_COUNT_HW_CACHE_MAX
]
252 [PERF_COUNT_HW_CACHE_OP_MAX
]
253 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
257 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
258 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
261 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
262 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
264 [ C(OP_PREFETCH
) ] = {
265 [ C(RESULT_ACCESS
) ] = 0x0,
266 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
271 [ C(RESULT_ACCESS
) ] = 0x0,
272 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
275 [ C(RESULT_ACCESS
) ] = -1,
276 [ C(RESULT_MISS
) ] = -1,
278 [ C(OP_PREFETCH
) ] = {
279 [ C(RESULT_ACCESS
) ] = 0x0,
280 [ C(RESULT_MISS
) ] = 0x0,
285 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
286 [ C(RESULT_ACCESS
) ] = 0x01b7,
287 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
288 [ C(RESULT_MISS
) ] = 0x01b7,
291 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
292 [ C(RESULT_ACCESS
) ] = 0x01b7,
293 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
294 [ C(RESULT_MISS
) ] = 0x01b7,
296 [ C(OP_PREFETCH
) ] = {
297 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
298 [ C(RESULT_ACCESS
) ] = 0x01b7,
299 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
300 [ C(RESULT_MISS
) ] = 0x01b7,
305 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
306 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
309 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
310 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
312 [ C(OP_PREFETCH
) ] = {
313 [ C(RESULT_ACCESS
) ] = 0x0,
314 [ C(RESULT_MISS
) ] = 0x0,
319 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
320 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
323 [ C(RESULT_ACCESS
) ] = -1,
324 [ C(RESULT_MISS
) ] = -1,
326 [ C(OP_PREFETCH
) ] = {
327 [ C(RESULT_ACCESS
) ] = -1,
328 [ C(RESULT_MISS
) ] = -1,
333 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
334 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
337 [ C(RESULT_ACCESS
) ] = -1,
338 [ C(RESULT_MISS
) ] = -1,
340 [ C(OP_PREFETCH
) ] = {
341 [ C(RESULT_ACCESS
) ] = -1,
342 [ C(RESULT_MISS
) ] = -1,
347 [ C(RESULT_ACCESS
) ] = 0x01b7,
348 [ C(RESULT_MISS
) ] = 0x01b7,
351 [ C(RESULT_ACCESS
) ] = 0x01b7,
352 [ C(RESULT_MISS
) ] = 0x01b7,
354 [ C(OP_PREFETCH
) ] = {
355 [ C(RESULT_ACCESS
) ] = 0x01b7,
356 [ C(RESULT_MISS
) ] = 0x01b7,
362 static __initconst
const u64 westmere_hw_cache_event_ids
363 [PERF_COUNT_HW_CACHE_MAX
]
364 [PERF_COUNT_HW_CACHE_OP_MAX
]
365 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
369 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
370 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
373 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
374 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
376 [ C(OP_PREFETCH
) ] = {
377 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
378 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
383 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
384 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
387 [ C(RESULT_ACCESS
) ] = -1,
388 [ C(RESULT_MISS
) ] = -1,
390 [ C(OP_PREFETCH
) ] = {
391 [ C(RESULT_ACCESS
) ] = 0x0,
392 [ C(RESULT_MISS
) ] = 0x0,
397 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
398 [ C(RESULT_ACCESS
) ] = 0x01b7,
399 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
400 [ C(RESULT_MISS
) ] = 0x01b7,
403 * Use RFO, not WRITEBACK, because a write miss would typically occur
407 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
408 [ C(RESULT_ACCESS
) ] = 0x01b7,
409 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
410 [ C(RESULT_MISS
) ] = 0x01b7,
412 [ C(OP_PREFETCH
) ] = {
413 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
414 [ C(RESULT_ACCESS
) ] = 0x01b7,
415 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
416 [ C(RESULT_MISS
) ] = 0x01b7,
421 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
422 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
425 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
426 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
428 [ C(OP_PREFETCH
) ] = {
429 [ C(RESULT_ACCESS
) ] = 0x0,
430 [ C(RESULT_MISS
) ] = 0x0,
435 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
436 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
439 [ C(RESULT_ACCESS
) ] = -1,
440 [ C(RESULT_MISS
) ] = -1,
442 [ C(OP_PREFETCH
) ] = {
443 [ C(RESULT_ACCESS
) ] = -1,
444 [ C(RESULT_MISS
) ] = -1,
449 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
450 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
453 [ C(RESULT_ACCESS
) ] = -1,
454 [ C(RESULT_MISS
) ] = -1,
456 [ C(OP_PREFETCH
) ] = {
457 [ C(RESULT_ACCESS
) ] = -1,
458 [ C(RESULT_MISS
) ] = -1,
463 [ C(RESULT_ACCESS
) ] = 0x01b7,
464 [ C(RESULT_MISS
) ] = 0x01b7,
467 [ C(RESULT_ACCESS
) ] = 0x01b7,
468 [ C(RESULT_MISS
) ] = 0x01b7,
470 [ C(OP_PREFETCH
) ] = {
471 [ C(RESULT_ACCESS
) ] = 0x01b7,
472 [ C(RESULT_MISS
) ] = 0x01b7,
478 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
479 * See IA32 SDM Vol 3B 30.6.1.3
482 #define NHM_DMND_DATA_RD (1 << 0)
483 #define NHM_DMND_RFO (1 << 1)
484 #define NHM_DMND_IFETCH (1 << 2)
485 #define NHM_DMND_WB (1 << 3)
486 #define NHM_PF_DATA_RD (1 << 4)
487 #define NHM_PF_DATA_RFO (1 << 5)
488 #define NHM_PF_IFETCH (1 << 6)
489 #define NHM_OFFCORE_OTHER (1 << 7)
490 #define NHM_UNCORE_HIT (1 << 8)
491 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
492 #define NHM_OTHER_CORE_HITM (1 << 10)
494 #define NHM_REMOTE_CACHE_FWD (1 << 12)
495 #define NHM_REMOTE_DRAM (1 << 13)
496 #define NHM_LOCAL_DRAM (1 << 14)
497 #define NHM_NON_DRAM (1 << 15)
499 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
500 #define NHM_REMOTE (NHM_REMOTE_DRAM)
502 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
503 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
504 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
506 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
507 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
508 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
510 static __initconst
const u64 nehalem_hw_cache_extra_regs
511 [PERF_COUNT_HW_CACHE_MAX
]
512 [PERF_COUNT_HW_CACHE_OP_MAX
]
513 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
517 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
518 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
521 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
522 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
524 [ C(OP_PREFETCH
) ] = {
525 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
526 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
531 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
532 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
535 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
536 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
538 [ C(OP_PREFETCH
) ] = {
539 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
540 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
545 static __initconst
const u64 nehalem_hw_cache_event_ids
546 [PERF_COUNT_HW_CACHE_MAX
]
547 [PERF_COUNT_HW_CACHE_OP_MAX
]
548 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
552 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
553 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
556 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
557 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
559 [ C(OP_PREFETCH
) ] = {
560 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
561 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
566 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
567 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
570 [ C(RESULT_ACCESS
) ] = -1,
571 [ C(RESULT_MISS
) ] = -1,
573 [ C(OP_PREFETCH
) ] = {
574 [ C(RESULT_ACCESS
) ] = 0x0,
575 [ C(RESULT_MISS
) ] = 0x0,
580 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
581 [ C(RESULT_ACCESS
) ] = 0x01b7,
582 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
583 [ C(RESULT_MISS
) ] = 0x01b7,
586 * Use RFO, not WRITEBACK, because a write miss would typically occur
590 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
591 [ C(RESULT_ACCESS
) ] = 0x01b7,
592 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
593 [ C(RESULT_MISS
) ] = 0x01b7,
595 [ C(OP_PREFETCH
) ] = {
596 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
597 [ C(RESULT_ACCESS
) ] = 0x01b7,
598 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
599 [ C(RESULT_MISS
) ] = 0x01b7,
604 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
605 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
608 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
609 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
611 [ C(OP_PREFETCH
) ] = {
612 [ C(RESULT_ACCESS
) ] = 0x0,
613 [ C(RESULT_MISS
) ] = 0x0,
618 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
619 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
622 [ C(RESULT_ACCESS
) ] = -1,
623 [ C(RESULT_MISS
) ] = -1,
625 [ C(OP_PREFETCH
) ] = {
626 [ C(RESULT_ACCESS
) ] = -1,
627 [ C(RESULT_MISS
) ] = -1,
632 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
633 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
636 [ C(RESULT_ACCESS
) ] = -1,
637 [ C(RESULT_MISS
) ] = -1,
639 [ C(OP_PREFETCH
) ] = {
640 [ C(RESULT_ACCESS
) ] = -1,
641 [ C(RESULT_MISS
) ] = -1,
646 [ C(RESULT_ACCESS
) ] = 0x01b7,
647 [ C(RESULT_MISS
) ] = 0x01b7,
650 [ C(RESULT_ACCESS
) ] = 0x01b7,
651 [ C(RESULT_MISS
) ] = 0x01b7,
653 [ C(OP_PREFETCH
) ] = {
654 [ C(RESULT_ACCESS
) ] = 0x01b7,
655 [ C(RESULT_MISS
) ] = 0x01b7,
660 static __initconst
const u64 core2_hw_cache_event_ids
661 [PERF_COUNT_HW_CACHE_MAX
]
662 [PERF_COUNT_HW_CACHE_OP_MAX
]
663 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
667 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
668 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
671 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
672 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
674 [ C(OP_PREFETCH
) ] = {
675 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
676 [ C(RESULT_MISS
) ] = 0,
681 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
682 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
685 [ C(RESULT_ACCESS
) ] = -1,
686 [ C(RESULT_MISS
) ] = -1,
688 [ C(OP_PREFETCH
) ] = {
689 [ C(RESULT_ACCESS
) ] = 0,
690 [ C(RESULT_MISS
) ] = 0,
695 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
696 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
699 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
700 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
702 [ C(OP_PREFETCH
) ] = {
703 [ C(RESULT_ACCESS
) ] = 0,
704 [ C(RESULT_MISS
) ] = 0,
709 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
710 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
713 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
714 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
716 [ C(OP_PREFETCH
) ] = {
717 [ C(RESULT_ACCESS
) ] = 0,
718 [ C(RESULT_MISS
) ] = 0,
723 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
724 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
727 [ C(RESULT_ACCESS
) ] = -1,
728 [ C(RESULT_MISS
) ] = -1,
730 [ C(OP_PREFETCH
) ] = {
731 [ C(RESULT_ACCESS
) ] = -1,
732 [ C(RESULT_MISS
) ] = -1,
737 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
738 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
741 [ C(RESULT_ACCESS
) ] = -1,
742 [ C(RESULT_MISS
) ] = -1,
744 [ C(OP_PREFETCH
) ] = {
745 [ C(RESULT_ACCESS
) ] = -1,
746 [ C(RESULT_MISS
) ] = -1,
751 static __initconst
const u64 atom_hw_cache_event_ids
752 [PERF_COUNT_HW_CACHE_MAX
]
753 [PERF_COUNT_HW_CACHE_OP_MAX
]
754 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
758 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
759 [ C(RESULT_MISS
) ] = 0,
762 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
763 [ C(RESULT_MISS
) ] = 0,
765 [ C(OP_PREFETCH
) ] = {
766 [ C(RESULT_ACCESS
) ] = 0x0,
767 [ C(RESULT_MISS
) ] = 0,
772 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
773 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
776 [ C(RESULT_ACCESS
) ] = -1,
777 [ C(RESULT_MISS
) ] = -1,
779 [ C(OP_PREFETCH
) ] = {
780 [ C(RESULT_ACCESS
) ] = 0,
781 [ C(RESULT_MISS
) ] = 0,
786 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
787 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
790 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
791 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
793 [ C(OP_PREFETCH
) ] = {
794 [ C(RESULT_ACCESS
) ] = 0,
795 [ C(RESULT_MISS
) ] = 0,
800 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
801 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
804 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
805 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
807 [ C(OP_PREFETCH
) ] = {
808 [ C(RESULT_ACCESS
) ] = 0,
809 [ C(RESULT_MISS
) ] = 0,
814 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
815 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
818 [ C(RESULT_ACCESS
) ] = -1,
819 [ C(RESULT_MISS
) ] = -1,
821 [ C(OP_PREFETCH
) ] = {
822 [ C(RESULT_ACCESS
) ] = -1,
823 [ C(RESULT_MISS
) ] = -1,
828 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
829 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
832 [ C(RESULT_ACCESS
) ] = -1,
833 [ C(RESULT_MISS
) ] = -1,
835 [ C(OP_PREFETCH
) ] = {
836 [ C(RESULT_ACCESS
) ] = -1,
837 [ C(RESULT_MISS
) ] = -1,
842 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
844 /* user explicitly requested branch sampling */
845 if (has_branch_stack(event
))
848 /* implicit branch sampling to correct PEBS skid */
849 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
855 static void intel_pmu_disable_all(void)
857 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
859 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
861 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
862 intel_pmu_disable_bts();
864 intel_pmu_pebs_disable_all();
865 intel_pmu_lbr_disable_all();
868 static void intel_pmu_enable_all(int added
)
870 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
872 intel_pmu_pebs_enable_all();
873 intel_pmu_lbr_enable_all();
874 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
875 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
877 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
878 struct perf_event
*event
=
879 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
881 if (WARN_ON_ONCE(!event
))
884 intel_pmu_enable_bts(event
->hw
.config
);
890 * Intel Errata AAK100 (model 26)
891 * Intel Errata AAP53 (model 30)
892 * Intel Errata BD53 (model 44)
894 * The official story:
895 * These chips need to be 'reset' when adding counters by programming the
896 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
897 * in sequence on the same PMC or on different PMCs.
899 * In practise it appears some of these events do in fact count, and
900 * we need to programm all 4 events.
902 static void intel_pmu_nhm_workaround(void)
904 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
905 static const unsigned long nhm_magic
[4] = {
911 struct perf_event
*event
;
915 * The Errata requires below steps:
916 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
917 * 2) Configure 4 PERFEVTSELx with the magic events and clear
918 * the corresponding PMCx;
919 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
920 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
921 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
925 * The real steps we choose are a little different from above.
926 * A) To reduce MSR operations, we don't run step 1) as they
927 * are already cleared before this function is called;
928 * B) Call x86_perf_event_update to save PMCx before configuring
929 * PERFEVTSELx with magic number;
930 * C) With step 5), we do clear only when the PERFEVTSELx is
931 * not used currently.
932 * D) Call x86_perf_event_set_period to restore PMCx;
935 /* We always operate 4 pairs of PERF Counters */
936 for (i
= 0; i
< 4; i
++) {
937 event
= cpuc
->events
[i
];
939 x86_perf_event_update(event
);
942 for (i
= 0; i
< 4; i
++) {
943 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
944 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
947 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
948 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
950 for (i
= 0; i
< 4; i
++) {
951 event
= cpuc
->events
[i
];
954 x86_perf_event_set_period(event
);
955 __x86_pmu_enable_event(&event
->hw
,
956 ARCH_PERFMON_EVENTSEL_ENABLE
);
958 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
962 static void intel_pmu_nhm_enable_all(int added
)
965 intel_pmu_nhm_workaround();
966 intel_pmu_enable_all(added
);
969 static inline u64
intel_pmu_get_status(void)
973 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
978 static inline void intel_pmu_ack_status(u64 ack
)
980 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
983 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
985 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
988 mask
= 0xfULL
<< (idx
* 4);
990 rdmsrl(hwc
->config_base
, ctrl_val
);
992 wrmsrl(hwc
->config_base
, ctrl_val
);
995 static void intel_pmu_disable_event(struct perf_event
*event
)
997 struct hw_perf_event
*hwc
= &event
->hw
;
998 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1000 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1001 intel_pmu_disable_bts();
1002 intel_pmu_drain_bts_buffer();
1006 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1007 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1010 * must disable before any actual event
1011 * because any event may be combined with LBR
1013 if (intel_pmu_needs_lbr_smpl(event
))
1014 intel_pmu_lbr_disable(event
);
1016 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1017 intel_pmu_disable_fixed(hwc
);
1021 x86_pmu_disable_event(event
);
1023 if (unlikely(event
->attr
.precise_ip
))
1024 intel_pmu_pebs_disable(event
);
1027 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1029 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1030 u64 ctrl_val
, bits
, mask
;
1033 * Enable IRQ generation (0x8),
1034 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1038 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1040 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1044 * ANY bit is supported in v3 and up
1046 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1050 mask
= 0xfULL
<< (idx
* 4);
1052 rdmsrl(hwc
->config_base
, ctrl_val
);
1055 wrmsrl(hwc
->config_base
, ctrl_val
);
1058 static void intel_pmu_enable_event(struct perf_event
*event
)
1060 struct hw_perf_event
*hwc
= &event
->hw
;
1061 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1063 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1064 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1067 intel_pmu_enable_bts(hwc
->config
);
1071 * must enabled before any actual event
1072 * because any event may be combined with LBR
1074 if (intel_pmu_needs_lbr_smpl(event
))
1075 intel_pmu_lbr_enable(event
);
1077 if (event
->attr
.exclude_host
)
1078 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1079 if (event
->attr
.exclude_guest
)
1080 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1082 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1083 intel_pmu_enable_fixed(hwc
);
1087 if (unlikely(event
->attr
.precise_ip
))
1088 intel_pmu_pebs_enable(event
);
1090 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1094 * Save and restart an expired event. Called by NMI contexts,
1095 * so it has to be careful about preempting normal event ops:
1097 int intel_pmu_save_and_restart(struct perf_event
*event
)
1099 x86_perf_event_update(event
);
1100 return x86_perf_event_set_period(event
);
1103 static void intel_pmu_reset(void)
1105 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1106 unsigned long flags
;
1109 if (!x86_pmu
.num_counters
)
1112 local_irq_save(flags
);
1114 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1116 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1117 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1118 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1120 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1121 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1124 ds
->bts_index
= ds
->bts_buffer_base
;
1126 local_irq_restore(flags
);
1130 * This handler is triggered by the local APIC, so the APIC IRQ handling
1133 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1135 struct perf_sample_data data
;
1136 struct cpu_hw_events
*cpuc
;
1141 cpuc
= &__get_cpu_var(cpu_hw_events
);
1144 * Some chipsets need to unmask the LVTPC in a particular spot
1145 * inside the nmi handler. As a result, the unmasking was pushed
1146 * into all the nmi handlers.
1148 * This handler doesn't seem to have any issues with the unmasking
1149 * so it was left at the top.
1151 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1153 intel_pmu_disable_all();
1154 handled
= intel_pmu_drain_bts_buffer();
1155 status
= intel_pmu_get_status();
1157 intel_pmu_enable_all(0);
1163 intel_pmu_ack_status(status
);
1164 if (++loops
> 100) {
1165 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1166 perf_event_print_debug();
1171 inc_irq_stat(apic_perf_irqs
);
1173 intel_pmu_lbr_read();
1176 * PEBS overflow sets bit 62 in the global status register
1178 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1180 x86_pmu
.drain_pebs(regs
);
1183 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1184 struct perf_event
*event
= cpuc
->events
[bit
];
1188 if (!test_bit(bit
, cpuc
->active_mask
))
1191 if (!intel_pmu_save_and_restart(event
))
1194 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1196 if (has_branch_stack(event
))
1197 data
.br_stack
= &cpuc
->lbr_stack
;
1199 if (perf_event_overflow(event
, &data
, regs
))
1200 x86_pmu_stop(event
, 0);
1204 * Repeat if there is more work to be done:
1206 status
= intel_pmu_get_status();
1211 intel_pmu_enable_all(0);
1215 static struct event_constraint
*
1216 intel_bts_constraints(struct perf_event
*event
)
1218 struct hw_perf_event
*hwc
= &event
->hw
;
1219 unsigned int hw_event
, bts_event
;
1221 if (event
->attr
.freq
)
1224 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1225 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1227 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1228 return &bts_constraint
;
1233 static int intel_alt_er(int idx
)
1235 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1238 if (idx
== EXTRA_REG_RSP_0
)
1239 return EXTRA_REG_RSP_1
;
1241 if (idx
== EXTRA_REG_RSP_1
)
1242 return EXTRA_REG_RSP_0
;
1247 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1249 event
->hw
.extra_reg
.idx
= idx
;
1251 if (idx
== EXTRA_REG_RSP_0
) {
1252 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1253 event
->hw
.config
|= 0x01b7;
1254 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1255 } else if (idx
== EXTRA_REG_RSP_1
) {
1256 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1257 event
->hw
.config
|= 0x01bb;
1258 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1263 * manage allocation of shared extra msr for certain events
1266 * per-cpu: to be shared between the various events on a single PMU
1267 * per-core: per-cpu + shared by HT threads
1269 static struct event_constraint
*
1270 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1271 struct perf_event
*event
,
1272 struct hw_perf_event_extra
*reg
)
1274 struct event_constraint
*c
= &emptyconstraint
;
1275 struct er_account
*era
;
1276 unsigned long flags
;
1280 * reg->alloc can be set due to existing state, so for fake cpuc we
1281 * need to ignore this, otherwise we might fail to allocate proper fake
1282 * state for this extra reg constraint. Also see the comment below.
1284 if (reg
->alloc
&& !cpuc
->is_fake
)
1285 return NULL
; /* call x86_get_event_constraint() */
1288 era
= &cpuc
->shared_regs
->regs
[idx
];
1290 * we use spin_lock_irqsave() to avoid lockdep issues when
1291 * passing a fake cpuc
1293 raw_spin_lock_irqsave(&era
->lock
, flags
);
1295 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1298 * If its a fake cpuc -- as per validate_{group,event}() we
1299 * shouldn't touch event state and we can avoid doing so
1300 * since both will only call get_event_constraints() once
1301 * on each event, this avoids the need for reg->alloc.
1303 * Not doing the ER fixup will only result in era->reg being
1304 * wrong, but since we won't actually try and program hardware
1305 * this isn't a problem either.
1307 if (!cpuc
->is_fake
) {
1308 if (idx
!= reg
->idx
)
1309 intel_fixup_er(event
, idx
);
1312 * x86_schedule_events() can call get_event_constraints()
1313 * multiple times on events in the case of incremental
1314 * scheduling(). reg->alloc ensures we only do the ER
1320 /* lock in msr value */
1321 era
->config
= reg
->config
;
1322 era
->reg
= reg
->reg
;
1325 atomic_inc(&era
->ref
);
1328 * need to call x86_get_event_constraint()
1329 * to check if associated event has constraints
1333 idx
= intel_alt_er(idx
);
1334 if (idx
!= reg
->idx
) {
1335 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1339 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1345 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1346 struct hw_perf_event_extra
*reg
)
1348 struct er_account
*era
;
1351 * Only put constraint if extra reg was actually allocated. Also takes
1352 * care of event which do not use an extra shared reg.
1354 * Also, if this is a fake cpuc we shouldn't touch any event state
1355 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1356 * either since it'll be thrown out.
1358 if (!reg
->alloc
|| cpuc
->is_fake
)
1361 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1363 /* one fewer user */
1364 atomic_dec(&era
->ref
);
1366 /* allocate again next time */
1370 static struct event_constraint
*
1371 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1372 struct perf_event
*event
)
1374 struct event_constraint
*c
= NULL
, *d
;
1375 struct hw_perf_event_extra
*xreg
, *breg
;
1377 xreg
= &event
->hw
.extra_reg
;
1378 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1379 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1380 if (c
== &emptyconstraint
)
1383 breg
= &event
->hw
.branch_reg
;
1384 if (breg
->idx
!= EXTRA_REG_NONE
) {
1385 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1386 if (d
== &emptyconstraint
) {
1387 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1394 struct event_constraint
*
1395 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1397 struct event_constraint
*c
;
1399 if (x86_pmu
.event_constraints
) {
1400 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1401 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
1406 return &unconstrained
;
1409 static struct event_constraint
*
1410 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1412 struct event_constraint
*c
;
1414 c
= intel_bts_constraints(event
);
1418 c
= intel_pebs_constraints(event
);
1422 c
= intel_shared_regs_constraints(cpuc
, event
);
1426 return x86_get_event_constraints(cpuc
, event
);
1430 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1431 struct perf_event
*event
)
1433 struct hw_perf_event_extra
*reg
;
1435 reg
= &event
->hw
.extra_reg
;
1436 if (reg
->idx
!= EXTRA_REG_NONE
)
1437 __intel_shared_reg_put_constraints(cpuc
, reg
);
1439 reg
= &event
->hw
.branch_reg
;
1440 if (reg
->idx
!= EXTRA_REG_NONE
)
1441 __intel_shared_reg_put_constraints(cpuc
, reg
);
1444 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1445 struct perf_event
*event
)
1447 intel_put_shared_regs_event_constraints(cpuc
, event
);
1450 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1452 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1454 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1455 * (0x003c) so that we can use it with PEBS.
1457 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1458 * PEBS capable. However we can use INST_RETIRED.ANY_P
1459 * (0x00c0), which is a PEBS capable event, to get the same
1462 * INST_RETIRED.ANY_P counts the number of cycles that retires
1463 * CNTMASK instructions. By setting CNTMASK to a value (16)
1464 * larger than the maximum number of instructions that can be
1465 * retired per cycle (4) and then inverting the condition, we
1466 * count all cycles that retire 16 or less instructions, which
1469 * Thereby we gain a PEBS capable cycle counter.
1471 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1473 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1474 event
->hw
.config
= alt_config
;
1478 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1480 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1482 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1483 * (0x003c) so that we can use it with PEBS.
1485 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1486 * PEBS capable. However we can use UOPS_RETIRED.ALL
1487 * (0x01c2), which is a PEBS capable event, to get the same
1490 * UOPS_RETIRED.ALL counts the number of cycles that retires
1491 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1492 * larger than the maximum number of micro-ops that can be
1493 * retired per cycle (4) and then inverting the condition, we
1494 * count all cycles that retire 16 or less micro-ops, which
1497 * Thereby we gain a PEBS capable cycle counter.
1499 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1501 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1502 event
->hw
.config
= alt_config
;
1506 static int intel_pmu_hw_config(struct perf_event
*event
)
1508 int ret
= x86_pmu_hw_config(event
);
1513 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1514 x86_pmu
.pebs_aliases(event
);
1516 if (intel_pmu_needs_lbr_smpl(event
)) {
1517 ret
= intel_pmu_setup_lbr_filter(event
);
1522 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1525 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1528 if (x86_pmu
.version
< 3)
1531 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1534 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1539 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1541 if (x86_pmu
.guest_get_msrs
)
1542 return x86_pmu
.guest_get_msrs(nr
);
1546 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1548 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1550 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1551 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1553 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1554 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1555 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1557 * If PMU counter has PEBS enabled it is not enough to disable counter
1558 * on a guest entry since PEBS memory write can overshoot guest entry
1559 * and corrupt guest memory. Disabling PEBS solves the problem.
1561 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1562 arr
[1].host
= cpuc
->pebs_enabled
;
1569 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1571 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1572 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1575 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1576 struct perf_event
*event
= cpuc
->events
[idx
];
1578 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1579 arr
[idx
].host
= arr
[idx
].guest
= 0;
1581 if (!test_bit(idx
, cpuc
->active_mask
))
1584 arr
[idx
].host
= arr
[idx
].guest
=
1585 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1587 if (event
->attr
.exclude_host
)
1588 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1589 else if (event
->attr
.exclude_guest
)
1590 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1593 *nr
= x86_pmu
.num_counters
;
1597 static void core_pmu_enable_event(struct perf_event
*event
)
1599 if (!event
->attr
.exclude_host
)
1600 x86_pmu_enable_event(event
);
1603 static void core_pmu_enable_all(int added
)
1605 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1608 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1609 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1611 if (!test_bit(idx
, cpuc
->active_mask
) ||
1612 cpuc
->events
[idx
]->attr
.exclude_host
)
1615 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1619 PMU_FORMAT_ATTR(event
, "config:0-7" );
1620 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1621 PMU_FORMAT_ATTR(edge
, "config:18" );
1622 PMU_FORMAT_ATTR(pc
, "config:19" );
1623 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1624 PMU_FORMAT_ATTR(inv
, "config:23" );
1625 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1627 static struct attribute
*intel_arch_formats_attr
[] = {
1628 &format_attr_event
.attr
,
1629 &format_attr_umask
.attr
,
1630 &format_attr_edge
.attr
,
1631 &format_attr_pc
.attr
,
1632 &format_attr_inv
.attr
,
1633 &format_attr_cmask
.attr
,
1637 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
1639 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
1641 return x86_event_sysfs_show(page
, config
, event
);
1644 static __initconst
const struct x86_pmu core_pmu
= {
1646 .handle_irq
= x86_pmu_handle_irq
,
1647 .disable_all
= x86_pmu_disable_all
,
1648 .enable_all
= core_pmu_enable_all
,
1649 .enable
= core_pmu_enable_event
,
1650 .disable
= x86_pmu_disable_event
,
1651 .hw_config
= x86_pmu_hw_config
,
1652 .schedule_events
= x86_schedule_events
,
1653 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1654 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1655 .event_map
= intel_pmu_event_map
,
1656 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1659 * Intel PMCs cannot be accessed sanely above 32 bit width,
1660 * so we install an artificial 1<<31 period regardless of
1661 * the generic event period:
1663 .max_period
= (1ULL << 31) - 1,
1664 .get_event_constraints
= intel_get_event_constraints
,
1665 .put_event_constraints
= intel_put_event_constraints
,
1666 .event_constraints
= intel_core_event_constraints
,
1667 .guest_get_msrs
= core_guest_get_msrs
,
1668 .format_attrs
= intel_arch_formats_attr
,
1669 .events_sysfs_show
= intel_event_sysfs_show
,
1672 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1674 struct intel_shared_regs
*regs
;
1677 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1678 GFP_KERNEL
, cpu_to_node(cpu
));
1681 * initialize the locks to keep lockdep happy
1683 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1684 raw_spin_lock_init(®s
->regs
[i
].lock
);
1691 static int intel_pmu_cpu_prepare(int cpu
)
1693 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1695 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1698 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1699 if (!cpuc
->shared_regs
)
1705 static void intel_pmu_cpu_starting(int cpu
)
1707 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1708 int core_id
= topology_core_id(cpu
);
1711 init_debug_store_on_cpu(cpu
);
1713 * Deal with CPUs that don't clear their LBRs on power-up.
1715 intel_pmu_lbr_reset();
1717 cpuc
->lbr_sel
= NULL
;
1719 if (!cpuc
->shared_regs
)
1722 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
1723 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1724 struct intel_shared_regs
*pc
;
1726 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1727 if (pc
&& pc
->core_id
== core_id
) {
1728 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
1729 cpuc
->shared_regs
= pc
;
1733 cpuc
->shared_regs
->core_id
= core_id
;
1734 cpuc
->shared_regs
->refcnt
++;
1737 if (x86_pmu
.lbr_sel_map
)
1738 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
1741 static void intel_pmu_cpu_dying(int cpu
)
1743 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1744 struct intel_shared_regs
*pc
;
1746 pc
= cpuc
->shared_regs
;
1748 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1750 cpuc
->shared_regs
= NULL
;
1753 fini_debug_store_on_cpu(cpu
);
1756 static void intel_pmu_flush_branch_stack(void)
1759 * Intel LBR does not tag entries with the
1760 * PID of the current task, then we need to
1762 * For now, we simply reset it
1765 intel_pmu_lbr_reset();
1768 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
1770 static struct attribute
*intel_arch3_formats_attr
[] = {
1771 &format_attr_event
.attr
,
1772 &format_attr_umask
.attr
,
1773 &format_attr_edge
.attr
,
1774 &format_attr_pc
.attr
,
1775 &format_attr_any
.attr
,
1776 &format_attr_inv
.attr
,
1777 &format_attr_cmask
.attr
,
1779 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
1783 static __initconst
const struct x86_pmu intel_pmu
= {
1785 .handle_irq
= intel_pmu_handle_irq
,
1786 .disable_all
= intel_pmu_disable_all
,
1787 .enable_all
= intel_pmu_enable_all
,
1788 .enable
= intel_pmu_enable_event
,
1789 .disable
= intel_pmu_disable_event
,
1790 .hw_config
= intel_pmu_hw_config
,
1791 .schedule_events
= x86_schedule_events
,
1792 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1793 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1794 .event_map
= intel_pmu_event_map
,
1795 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1798 * Intel PMCs cannot be accessed sanely above 32 bit width,
1799 * so we install an artificial 1<<31 period regardless of
1800 * the generic event period:
1802 .max_period
= (1ULL << 31) - 1,
1803 .get_event_constraints
= intel_get_event_constraints
,
1804 .put_event_constraints
= intel_put_event_constraints
,
1805 .pebs_aliases
= intel_pebs_aliases_core2
,
1807 .format_attrs
= intel_arch3_formats_attr
,
1808 .events_sysfs_show
= intel_event_sysfs_show
,
1810 .cpu_prepare
= intel_pmu_cpu_prepare
,
1811 .cpu_starting
= intel_pmu_cpu_starting
,
1812 .cpu_dying
= intel_pmu_cpu_dying
,
1813 .guest_get_msrs
= intel_guest_get_msrs
,
1814 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
1817 static __init
void intel_clovertown_quirk(void)
1820 * PEBS is unreliable due to:
1822 * AJ67 - PEBS may experience CPL leaks
1823 * AJ68 - PEBS PMI may be delayed by one event
1824 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1825 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1827 * AJ67 could be worked around by restricting the OS/USR flags.
1828 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1830 * AJ106 could possibly be worked around by not allowing LBR
1831 * usage from PEBS, including the fixup.
1832 * AJ68 could possibly be worked around by always programming
1833 * a pebs_event_reset[0] value and coping with the lost events.
1835 * But taken together it might just make sense to not enable PEBS on
1838 pr_warn("PEBS disabled due to CPU errata\n");
1840 x86_pmu
.pebs_constraints
= NULL
;
1843 static int intel_snb_pebs_broken(int cpu
)
1845 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
1847 switch (cpu_data(cpu
).x86_model
) {
1852 case 45: /* SNB-EP */
1853 switch (cpu_data(cpu
).x86_mask
) {
1854 case 6: rev
= 0x618; break;
1855 case 7: rev
= 0x70c; break;
1859 return (cpu_data(cpu
).microcode
< rev
);
1862 static void intel_snb_check_microcode(void)
1864 int pebs_broken
= 0;
1868 for_each_online_cpu(cpu
) {
1869 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
1874 if (pebs_broken
== x86_pmu
.pebs_broken
)
1878 * Serialized by the microcode lock..
1880 if (x86_pmu
.pebs_broken
) {
1881 pr_info("PEBS enabled due to microcode update\n");
1882 x86_pmu
.pebs_broken
= 0;
1884 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1885 x86_pmu
.pebs_broken
= 1;
1889 static __init
void intel_sandybridge_quirk(void)
1891 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
1892 intel_snb_check_microcode();
1895 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
1896 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
1897 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
1898 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
1899 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
1900 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
1901 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
1902 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
1905 static __init
void intel_arch_events_quirk(void)
1909 /* disable event that reported as not presend by cpuid */
1910 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
1911 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
1912 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1913 intel_arch_events_map
[bit
].name
);
1917 static __init
void intel_nehalem_quirk(void)
1919 union cpuid10_ebx ebx
;
1921 ebx
.full
= x86_pmu
.events_maskl
;
1922 if (ebx
.split
.no_branch_misses_retired
) {
1924 * Erratum AAJ80 detected, we work it around by using
1925 * the BR_MISP_EXEC.ANY event. This will over-count
1926 * branch-misses, but it's still much better than the
1927 * architectural event which is often completely bogus:
1929 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1930 ebx
.split
.no_branch_misses_retired
= 0;
1931 x86_pmu
.events_maskl
= ebx
.full
;
1932 pr_info("CPU erratum AAJ80 worked around\n");
1936 __init
int intel_pmu_init(void)
1938 union cpuid10_edx edx
;
1939 union cpuid10_eax eax
;
1940 union cpuid10_ebx ebx
;
1941 struct event_constraint
*c
;
1942 unsigned int unused
;
1945 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1946 switch (boot_cpu_data
.x86
) {
1948 return p6_pmu_init();
1950 return knc_pmu_init();
1952 return p4_pmu_init();
1958 * Check whether the Architectural PerfMon supports
1959 * Branch Misses Retired hw_event or not.
1961 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
1962 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
1965 version
= eax
.split
.version_id
;
1969 x86_pmu
= intel_pmu
;
1971 x86_pmu
.version
= version
;
1972 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1973 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1974 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1976 x86_pmu
.events_maskl
= ebx
.full
;
1977 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
1979 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
1982 * Quirk: v2 perfmon does not report fixed-purpose events, so
1983 * assume at least 3 events:
1986 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1989 * v2 and above have a perf capabilities MSR
1994 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1995 x86_pmu
.intel_cap
.capabilities
= capabilities
;
2000 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
2003 * Install the hw-cache-events table:
2005 switch (boot_cpu_data
.x86_model
) {
2006 case 14: /* 65 nm core solo/duo, "Yonah" */
2007 pr_cont("Core events, ");
2010 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2011 x86_add_quirk(intel_clovertown_quirk
);
2012 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2013 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2014 case 29: /* six-core 45 nm xeon "Dunnington" */
2015 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2016 sizeof(hw_cache_event_ids
));
2018 intel_pmu_lbr_init_core();
2020 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2021 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2022 pr_cont("Core2 events, ");
2025 case 26: /* 45 nm nehalem, "Bloomfield" */
2026 case 30: /* 45 nm nehalem, "Lynnfield" */
2027 case 46: /* 45 nm nehalem-ex, "Beckton" */
2028 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2029 sizeof(hw_cache_event_ids
));
2030 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2031 sizeof(hw_cache_extra_regs
));
2033 intel_pmu_lbr_init_nhm();
2035 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2036 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2037 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2038 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2040 /* UOPS_ISSUED.STALLED_CYCLES */
2041 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2042 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2043 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2044 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2045 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2047 x86_add_quirk(intel_nehalem_quirk
);
2049 pr_cont("Nehalem events, ");
2053 case 38: /* Lincroft */
2054 case 39: /* Penwell */
2055 case 53: /* Cloverview */
2056 case 54: /* Cedarview */
2057 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2058 sizeof(hw_cache_event_ids
));
2060 intel_pmu_lbr_init_atom();
2062 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2063 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2064 pr_cont("Atom events, ");
2067 case 37: /* 32 nm nehalem, "Clarkdale" */
2068 case 44: /* 32 nm nehalem, "Gulftown" */
2069 case 47: /* 32 nm Xeon E7 */
2070 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2071 sizeof(hw_cache_event_ids
));
2072 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2073 sizeof(hw_cache_extra_regs
));
2075 intel_pmu_lbr_init_nhm();
2077 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2078 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2079 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2080 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2081 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2083 /* UOPS_ISSUED.STALLED_CYCLES */
2084 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2085 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2086 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2087 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2088 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2090 pr_cont("Westmere events, ");
2093 case 42: /* SandyBridge */
2094 case 45: /* SandyBridge, "Romely-EP" */
2095 x86_add_quirk(intel_sandybridge_quirk
);
2096 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2097 sizeof(hw_cache_event_ids
));
2098 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2099 sizeof(hw_cache_extra_regs
));
2101 intel_pmu_lbr_init_snb();
2103 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2104 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2105 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2106 if (boot_cpu_data
.x86_model
== 45)
2107 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2109 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2110 /* all extra regs are per-cpu when HT is on */
2111 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2112 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2114 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2115 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2116 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2117 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2118 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2119 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2121 pr_cont("SandyBridge events, ");
2123 case 58: /* IvyBridge */
2124 case 62: /* IvyBridge EP */
2125 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2126 sizeof(hw_cache_event_ids
));
2127 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2128 sizeof(hw_cache_extra_regs
));
2130 intel_pmu_lbr_init_snb();
2132 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2133 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2134 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2135 if (boot_cpu_data
.x86_model
== 62)
2136 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2138 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2139 /* all extra regs are per-cpu when HT is on */
2140 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2141 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2143 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2144 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2145 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2147 pr_cont("IvyBridge events, ");
2152 switch (x86_pmu
.version
) {
2154 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2155 pr_cont("generic architected perfmon v1, ");
2159 * default constraints for v2 and up
2161 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2162 pr_cont("generic architected perfmon, ");
2167 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2168 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2169 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2170 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2172 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2174 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2175 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2176 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2177 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2180 x86_pmu
.intel_ctrl
|=
2181 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2183 if (x86_pmu
.event_constraints
) {
2185 * event on fixed counter2 (REF_CYCLES) only works on this
2186 * counter, so do not extend mask to generic counters
2188 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2189 if (c
->cmask
!= X86_RAW_EVENT_MASK
2190 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2194 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2195 c
->weight
+= x86_pmu
.num_counters
;