4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
26 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
84 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
88 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
90 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
91 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
92 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
93 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
94 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
95 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
96 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
100 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
102 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
103 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
104 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
105 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
106 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
108 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
109 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
111 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
117 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
119 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
120 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
121 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
122 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
123 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
124 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
125 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
126 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
127 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
129 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
131 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
132 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
133 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
134 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
138 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
140 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
141 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
142 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
146 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
151 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
153 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
154 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
155 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
159 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
160 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
161 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
162 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
163 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
167 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
168 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
169 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
173 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
174 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
175 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
177 struct attribute
*nhm_events_attrs
[] = {
178 EVENT_PTR(mem_ld_nhm
),
182 struct attribute
*snb_events_attrs
[] = {
183 EVENT_PTR(mem_ld_snb
),
184 EVENT_PTR(mem_st_snb
),
188 static u64
intel_pmu_event_map(int hw_event
)
190 return intel_perfmon_event_map
[hw_event
];
193 #define SNB_DMND_DATA_RD (1ULL << 0)
194 #define SNB_DMND_RFO (1ULL << 1)
195 #define SNB_DMND_IFETCH (1ULL << 2)
196 #define SNB_DMND_WB (1ULL << 3)
197 #define SNB_PF_DATA_RD (1ULL << 4)
198 #define SNB_PF_RFO (1ULL << 5)
199 #define SNB_PF_IFETCH (1ULL << 6)
200 #define SNB_LLC_DATA_RD (1ULL << 7)
201 #define SNB_LLC_RFO (1ULL << 8)
202 #define SNB_LLC_IFETCH (1ULL << 9)
203 #define SNB_BUS_LOCKS (1ULL << 10)
204 #define SNB_STRM_ST (1ULL << 11)
205 #define SNB_OTHER (1ULL << 15)
206 #define SNB_RESP_ANY (1ULL << 16)
207 #define SNB_NO_SUPP (1ULL << 17)
208 #define SNB_LLC_HITM (1ULL << 18)
209 #define SNB_LLC_HITE (1ULL << 19)
210 #define SNB_LLC_HITS (1ULL << 20)
211 #define SNB_LLC_HITF (1ULL << 21)
212 #define SNB_LOCAL (1ULL << 22)
213 #define SNB_REMOTE (0xffULL << 23)
214 #define SNB_SNP_NONE (1ULL << 31)
215 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
216 #define SNB_SNP_MISS (1ULL << 33)
217 #define SNB_NO_FWD (1ULL << 34)
218 #define SNB_SNP_FWD (1ULL << 35)
219 #define SNB_HITM (1ULL << 36)
220 #define SNB_NON_DRAM (1ULL << 37)
222 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
223 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
224 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
226 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
227 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
230 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
231 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
233 #define SNB_L3_ACCESS SNB_RESP_ANY
234 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
236 static __initconst
const u64 snb_hw_cache_extra_regs
237 [PERF_COUNT_HW_CACHE_MAX
]
238 [PERF_COUNT_HW_CACHE_OP_MAX
]
239 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
243 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
244 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
247 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
248 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
250 [ C(OP_PREFETCH
) ] = {
251 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
252 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
257 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
258 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
261 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
262 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
264 [ C(OP_PREFETCH
) ] = {
265 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
266 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
271 static __initconst
const u64 snb_hw_cache_event_ids
272 [PERF_COUNT_HW_CACHE_MAX
]
273 [PERF_COUNT_HW_CACHE_OP_MAX
]
274 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
278 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
279 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
282 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
283 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
285 [ C(OP_PREFETCH
) ] = {
286 [ C(RESULT_ACCESS
) ] = 0x0,
287 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
292 [ C(RESULT_ACCESS
) ] = 0x0,
293 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
296 [ C(RESULT_ACCESS
) ] = -1,
297 [ C(RESULT_MISS
) ] = -1,
299 [ C(OP_PREFETCH
) ] = {
300 [ C(RESULT_ACCESS
) ] = 0x0,
301 [ C(RESULT_MISS
) ] = 0x0,
306 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
307 [ C(RESULT_ACCESS
) ] = 0x01b7,
308 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
309 [ C(RESULT_MISS
) ] = 0x01b7,
312 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
313 [ C(RESULT_ACCESS
) ] = 0x01b7,
314 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
315 [ C(RESULT_MISS
) ] = 0x01b7,
317 [ C(OP_PREFETCH
) ] = {
318 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
319 [ C(RESULT_ACCESS
) ] = 0x01b7,
320 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
321 [ C(RESULT_MISS
) ] = 0x01b7,
326 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
327 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
330 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
331 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
333 [ C(OP_PREFETCH
) ] = {
334 [ C(RESULT_ACCESS
) ] = 0x0,
335 [ C(RESULT_MISS
) ] = 0x0,
340 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
341 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
344 [ C(RESULT_ACCESS
) ] = -1,
345 [ C(RESULT_MISS
) ] = -1,
347 [ C(OP_PREFETCH
) ] = {
348 [ C(RESULT_ACCESS
) ] = -1,
349 [ C(RESULT_MISS
) ] = -1,
354 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
355 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
358 [ C(RESULT_ACCESS
) ] = -1,
359 [ C(RESULT_MISS
) ] = -1,
361 [ C(OP_PREFETCH
) ] = {
362 [ C(RESULT_ACCESS
) ] = -1,
363 [ C(RESULT_MISS
) ] = -1,
368 [ C(RESULT_ACCESS
) ] = 0x01b7,
369 [ C(RESULT_MISS
) ] = 0x01b7,
372 [ C(RESULT_ACCESS
) ] = 0x01b7,
373 [ C(RESULT_MISS
) ] = 0x01b7,
375 [ C(OP_PREFETCH
) ] = {
376 [ C(RESULT_ACCESS
) ] = 0x01b7,
377 [ C(RESULT_MISS
) ] = 0x01b7,
383 static __initconst
const u64 westmere_hw_cache_event_ids
384 [PERF_COUNT_HW_CACHE_MAX
]
385 [PERF_COUNT_HW_CACHE_OP_MAX
]
386 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
390 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
391 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
394 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
395 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
397 [ C(OP_PREFETCH
) ] = {
398 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
399 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
404 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
405 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
408 [ C(RESULT_ACCESS
) ] = -1,
409 [ C(RESULT_MISS
) ] = -1,
411 [ C(OP_PREFETCH
) ] = {
412 [ C(RESULT_ACCESS
) ] = 0x0,
413 [ C(RESULT_MISS
) ] = 0x0,
418 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
419 [ C(RESULT_ACCESS
) ] = 0x01b7,
420 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
421 [ C(RESULT_MISS
) ] = 0x01b7,
424 * Use RFO, not WRITEBACK, because a write miss would typically occur
428 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
429 [ C(RESULT_ACCESS
) ] = 0x01b7,
430 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
431 [ C(RESULT_MISS
) ] = 0x01b7,
433 [ C(OP_PREFETCH
) ] = {
434 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
435 [ C(RESULT_ACCESS
) ] = 0x01b7,
436 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
437 [ C(RESULT_MISS
) ] = 0x01b7,
442 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
443 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
446 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
447 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
449 [ C(OP_PREFETCH
) ] = {
450 [ C(RESULT_ACCESS
) ] = 0x0,
451 [ C(RESULT_MISS
) ] = 0x0,
456 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
457 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
460 [ C(RESULT_ACCESS
) ] = -1,
461 [ C(RESULT_MISS
) ] = -1,
463 [ C(OP_PREFETCH
) ] = {
464 [ C(RESULT_ACCESS
) ] = -1,
465 [ C(RESULT_MISS
) ] = -1,
470 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
471 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
474 [ C(RESULT_ACCESS
) ] = -1,
475 [ C(RESULT_MISS
) ] = -1,
477 [ C(OP_PREFETCH
) ] = {
478 [ C(RESULT_ACCESS
) ] = -1,
479 [ C(RESULT_MISS
) ] = -1,
484 [ C(RESULT_ACCESS
) ] = 0x01b7,
485 [ C(RESULT_MISS
) ] = 0x01b7,
488 [ C(RESULT_ACCESS
) ] = 0x01b7,
489 [ C(RESULT_MISS
) ] = 0x01b7,
491 [ C(OP_PREFETCH
) ] = {
492 [ C(RESULT_ACCESS
) ] = 0x01b7,
493 [ C(RESULT_MISS
) ] = 0x01b7,
499 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
500 * See IA32 SDM Vol 3B 30.6.1.3
503 #define NHM_DMND_DATA_RD (1 << 0)
504 #define NHM_DMND_RFO (1 << 1)
505 #define NHM_DMND_IFETCH (1 << 2)
506 #define NHM_DMND_WB (1 << 3)
507 #define NHM_PF_DATA_RD (1 << 4)
508 #define NHM_PF_DATA_RFO (1 << 5)
509 #define NHM_PF_IFETCH (1 << 6)
510 #define NHM_OFFCORE_OTHER (1 << 7)
511 #define NHM_UNCORE_HIT (1 << 8)
512 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
513 #define NHM_OTHER_CORE_HITM (1 << 10)
515 #define NHM_REMOTE_CACHE_FWD (1 << 12)
516 #define NHM_REMOTE_DRAM (1 << 13)
517 #define NHM_LOCAL_DRAM (1 << 14)
518 #define NHM_NON_DRAM (1 << 15)
520 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
521 #define NHM_REMOTE (NHM_REMOTE_DRAM)
523 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
524 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
525 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
527 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
528 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
529 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
531 static __initconst
const u64 nehalem_hw_cache_extra_regs
532 [PERF_COUNT_HW_CACHE_MAX
]
533 [PERF_COUNT_HW_CACHE_OP_MAX
]
534 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
538 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
539 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
542 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
543 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
545 [ C(OP_PREFETCH
) ] = {
546 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
547 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
552 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
553 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
556 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
557 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
559 [ C(OP_PREFETCH
) ] = {
560 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
561 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
566 static __initconst
const u64 nehalem_hw_cache_event_ids
567 [PERF_COUNT_HW_CACHE_MAX
]
568 [PERF_COUNT_HW_CACHE_OP_MAX
]
569 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
573 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
574 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
577 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
578 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
580 [ C(OP_PREFETCH
) ] = {
581 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
582 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
587 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
588 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
591 [ C(RESULT_ACCESS
) ] = -1,
592 [ C(RESULT_MISS
) ] = -1,
594 [ C(OP_PREFETCH
) ] = {
595 [ C(RESULT_ACCESS
) ] = 0x0,
596 [ C(RESULT_MISS
) ] = 0x0,
601 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
602 [ C(RESULT_ACCESS
) ] = 0x01b7,
603 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
604 [ C(RESULT_MISS
) ] = 0x01b7,
607 * Use RFO, not WRITEBACK, because a write miss would typically occur
611 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
612 [ C(RESULT_ACCESS
) ] = 0x01b7,
613 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
614 [ C(RESULT_MISS
) ] = 0x01b7,
616 [ C(OP_PREFETCH
) ] = {
617 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
618 [ C(RESULT_ACCESS
) ] = 0x01b7,
619 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
620 [ C(RESULT_MISS
) ] = 0x01b7,
625 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
626 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
629 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
630 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
632 [ C(OP_PREFETCH
) ] = {
633 [ C(RESULT_ACCESS
) ] = 0x0,
634 [ C(RESULT_MISS
) ] = 0x0,
639 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
640 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
643 [ C(RESULT_ACCESS
) ] = -1,
644 [ C(RESULT_MISS
) ] = -1,
646 [ C(OP_PREFETCH
) ] = {
647 [ C(RESULT_ACCESS
) ] = -1,
648 [ C(RESULT_MISS
) ] = -1,
653 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
654 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
657 [ C(RESULT_ACCESS
) ] = -1,
658 [ C(RESULT_MISS
) ] = -1,
660 [ C(OP_PREFETCH
) ] = {
661 [ C(RESULT_ACCESS
) ] = -1,
662 [ C(RESULT_MISS
) ] = -1,
667 [ C(RESULT_ACCESS
) ] = 0x01b7,
668 [ C(RESULT_MISS
) ] = 0x01b7,
671 [ C(RESULT_ACCESS
) ] = 0x01b7,
672 [ C(RESULT_MISS
) ] = 0x01b7,
674 [ C(OP_PREFETCH
) ] = {
675 [ C(RESULT_ACCESS
) ] = 0x01b7,
676 [ C(RESULT_MISS
) ] = 0x01b7,
681 static __initconst
const u64 core2_hw_cache_event_ids
682 [PERF_COUNT_HW_CACHE_MAX
]
683 [PERF_COUNT_HW_CACHE_OP_MAX
]
684 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
688 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
689 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
692 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
693 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
695 [ C(OP_PREFETCH
) ] = {
696 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
697 [ C(RESULT_MISS
) ] = 0,
702 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
703 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
706 [ C(RESULT_ACCESS
) ] = -1,
707 [ C(RESULT_MISS
) ] = -1,
709 [ C(OP_PREFETCH
) ] = {
710 [ C(RESULT_ACCESS
) ] = 0,
711 [ C(RESULT_MISS
) ] = 0,
716 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
717 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
720 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
721 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
723 [ C(OP_PREFETCH
) ] = {
724 [ C(RESULT_ACCESS
) ] = 0,
725 [ C(RESULT_MISS
) ] = 0,
730 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
731 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
734 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
735 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
737 [ C(OP_PREFETCH
) ] = {
738 [ C(RESULT_ACCESS
) ] = 0,
739 [ C(RESULT_MISS
) ] = 0,
744 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
745 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
748 [ C(RESULT_ACCESS
) ] = -1,
749 [ C(RESULT_MISS
) ] = -1,
751 [ C(OP_PREFETCH
) ] = {
752 [ C(RESULT_ACCESS
) ] = -1,
753 [ C(RESULT_MISS
) ] = -1,
758 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
759 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
762 [ C(RESULT_ACCESS
) ] = -1,
763 [ C(RESULT_MISS
) ] = -1,
765 [ C(OP_PREFETCH
) ] = {
766 [ C(RESULT_ACCESS
) ] = -1,
767 [ C(RESULT_MISS
) ] = -1,
772 static __initconst
const u64 atom_hw_cache_event_ids
773 [PERF_COUNT_HW_CACHE_MAX
]
774 [PERF_COUNT_HW_CACHE_OP_MAX
]
775 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
779 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
780 [ C(RESULT_MISS
) ] = 0,
783 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
784 [ C(RESULT_MISS
) ] = 0,
786 [ C(OP_PREFETCH
) ] = {
787 [ C(RESULT_ACCESS
) ] = 0x0,
788 [ C(RESULT_MISS
) ] = 0,
793 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
794 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
797 [ C(RESULT_ACCESS
) ] = -1,
798 [ C(RESULT_MISS
) ] = -1,
800 [ C(OP_PREFETCH
) ] = {
801 [ C(RESULT_ACCESS
) ] = 0,
802 [ C(RESULT_MISS
) ] = 0,
807 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
808 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
811 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
812 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
814 [ C(OP_PREFETCH
) ] = {
815 [ C(RESULT_ACCESS
) ] = 0,
816 [ C(RESULT_MISS
) ] = 0,
821 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
822 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
825 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
826 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
828 [ C(OP_PREFETCH
) ] = {
829 [ C(RESULT_ACCESS
) ] = 0,
830 [ C(RESULT_MISS
) ] = 0,
835 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
836 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
839 [ C(RESULT_ACCESS
) ] = -1,
840 [ C(RESULT_MISS
) ] = -1,
842 [ C(OP_PREFETCH
) ] = {
843 [ C(RESULT_ACCESS
) ] = -1,
844 [ C(RESULT_MISS
) ] = -1,
849 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
850 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
853 [ C(RESULT_ACCESS
) ] = -1,
854 [ C(RESULT_MISS
) ] = -1,
856 [ C(OP_PREFETCH
) ] = {
857 [ C(RESULT_ACCESS
) ] = -1,
858 [ C(RESULT_MISS
) ] = -1,
863 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
865 /* user explicitly requested branch sampling */
866 if (has_branch_stack(event
))
869 /* implicit branch sampling to correct PEBS skid */
870 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
876 static void intel_pmu_disable_all(void)
878 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
880 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
882 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
883 intel_pmu_disable_bts();
885 intel_pmu_pebs_disable_all();
886 intel_pmu_lbr_disable_all();
889 static void intel_pmu_enable_all(int added
)
891 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
893 intel_pmu_pebs_enable_all();
894 intel_pmu_lbr_enable_all();
895 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
896 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
898 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
899 struct perf_event
*event
=
900 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
902 if (WARN_ON_ONCE(!event
))
905 intel_pmu_enable_bts(event
->hw
.config
);
911 * Intel Errata AAK100 (model 26)
912 * Intel Errata AAP53 (model 30)
913 * Intel Errata BD53 (model 44)
915 * The official story:
916 * These chips need to be 'reset' when adding counters by programming the
917 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
918 * in sequence on the same PMC or on different PMCs.
920 * In practise it appears some of these events do in fact count, and
921 * we need to programm all 4 events.
923 static void intel_pmu_nhm_workaround(void)
925 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
926 static const unsigned long nhm_magic
[4] = {
932 struct perf_event
*event
;
936 * The Errata requires below steps:
937 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
938 * 2) Configure 4 PERFEVTSELx with the magic events and clear
939 * the corresponding PMCx;
940 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
941 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
942 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
946 * The real steps we choose are a little different from above.
947 * A) To reduce MSR operations, we don't run step 1) as they
948 * are already cleared before this function is called;
949 * B) Call x86_perf_event_update to save PMCx before configuring
950 * PERFEVTSELx with magic number;
951 * C) With step 5), we do clear only when the PERFEVTSELx is
952 * not used currently.
953 * D) Call x86_perf_event_set_period to restore PMCx;
956 /* We always operate 4 pairs of PERF Counters */
957 for (i
= 0; i
< 4; i
++) {
958 event
= cpuc
->events
[i
];
960 x86_perf_event_update(event
);
963 for (i
= 0; i
< 4; i
++) {
964 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
965 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
968 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
969 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
971 for (i
= 0; i
< 4; i
++) {
972 event
= cpuc
->events
[i
];
975 x86_perf_event_set_period(event
);
976 __x86_pmu_enable_event(&event
->hw
,
977 ARCH_PERFMON_EVENTSEL_ENABLE
);
979 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
983 static void intel_pmu_nhm_enable_all(int added
)
986 intel_pmu_nhm_workaround();
987 intel_pmu_enable_all(added
);
990 static inline u64
intel_pmu_get_status(void)
994 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
999 static inline void intel_pmu_ack_status(u64 ack
)
1001 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1004 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
1006 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1009 mask
= 0xfULL
<< (idx
* 4);
1011 rdmsrl(hwc
->config_base
, ctrl_val
);
1013 wrmsrl(hwc
->config_base
, ctrl_val
);
1016 static void intel_pmu_disable_event(struct perf_event
*event
)
1018 struct hw_perf_event
*hwc
= &event
->hw
;
1019 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1021 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1022 intel_pmu_disable_bts();
1023 intel_pmu_drain_bts_buffer();
1027 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1028 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1031 * must disable before any actual event
1032 * because any event may be combined with LBR
1034 if (intel_pmu_needs_lbr_smpl(event
))
1035 intel_pmu_lbr_disable(event
);
1037 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1038 intel_pmu_disable_fixed(hwc
);
1042 x86_pmu_disable_event(event
);
1044 if (unlikely(event
->attr
.precise_ip
))
1045 intel_pmu_pebs_disable(event
);
1048 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1050 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1051 u64 ctrl_val
, bits
, mask
;
1054 * Enable IRQ generation (0x8),
1055 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1059 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1061 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1065 * ANY bit is supported in v3 and up
1067 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1071 mask
= 0xfULL
<< (idx
* 4);
1073 rdmsrl(hwc
->config_base
, ctrl_val
);
1076 wrmsrl(hwc
->config_base
, ctrl_val
);
1079 static void intel_pmu_enable_event(struct perf_event
*event
)
1081 struct hw_perf_event
*hwc
= &event
->hw
;
1082 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1084 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1085 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1088 intel_pmu_enable_bts(hwc
->config
);
1092 * must enabled before any actual event
1093 * because any event may be combined with LBR
1095 if (intel_pmu_needs_lbr_smpl(event
))
1096 intel_pmu_lbr_enable(event
);
1098 if (event
->attr
.exclude_host
)
1099 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1100 if (event
->attr
.exclude_guest
)
1101 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1103 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1104 intel_pmu_enable_fixed(hwc
);
1108 if (unlikely(event
->attr
.precise_ip
))
1109 intel_pmu_pebs_enable(event
);
1111 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1115 * Save and restart an expired event. Called by NMI contexts,
1116 * so it has to be careful about preempting normal event ops:
1118 int intel_pmu_save_and_restart(struct perf_event
*event
)
1120 x86_perf_event_update(event
);
1121 return x86_perf_event_set_period(event
);
1124 static void intel_pmu_reset(void)
1126 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1127 unsigned long flags
;
1130 if (!x86_pmu
.num_counters
)
1133 local_irq_save(flags
);
1135 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1137 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1138 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1139 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1141 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1142 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1145 ds
->bts_index
= ds
->bts_buffer_base
;
1147 local_irq_restore(flags
);
1151 * This handler is triggered by the local APIC, so the APIC IRQ handling
1154 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1156 struct perf_sample_data data
;
1157 struct cpu_hw_events
*cpuc
;
1162 cpuc
= &__get_cpu_var(cpu_hw_events
);
1165 * Some chipsets need to unmask the LVTPC in a particular spot
1166 * inside the nmi handler. As a result, the unmasking was pushed
1167 * into all the nmi handlers.
1169 * This handler doesn't seem to have any issues with the unmasking
1170 * so it was left at the top.
1172 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1174 intel_pmu_disable_all();
1175 handled
= intel_pmu_drain_bts_buffer();
1176 status
= intel_pmu_get_status();
1178 intel_pmu_enable_all(0);
1184 intel_pmu_ack_status(status
);
1185 if (++loops
> 100) {
1186 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1187 perf_event_print_debug();
1192 inc_irq_stat(apic_perf_irqs
);
1194 intel_pmu_lbr_read();
1197 * PEBS overflow sets bit 62 in the global status register
1199 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1201 x86_pmu
.drain_pebs(regs
);
1204 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1205 struct perf_event
*event
= cpuc
->events
[bit
];
1209 if (!test_bit(bit
, cpuc
->active_mask
))
1212 if (!intel_pmu_save_and_restart(event
))
1215 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1217 if (has_branch_stack(event
))
1218 data
.br_stack
= &cpuc
->lbr_stack
;
1220 if (perf_event_overflow(event
, &data
, regs
))
1221 x86_pmu_stop(event
, 0);
1225 * Repeat if there is more work to be done:
1227 status
= intel_pmu_get_status();
1232 intel_pmu_enable_all(0);
1236 static struct event_constraint
*
1237 intel_bts_constraints(struct perf_event
*event
)
1239 struct hw_perf_event
*hwc
= &event
->hw
;
1240 unsigned int hw_event
, bts_event
;
1242 if (event
->attr
.freq
)
1245 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1246 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1248 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1249 return &bts_constraint
;
1254 static int intel_alt_er(int idx
)
1256 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1259 if (idx
== EXTRA_REG_RSP_0
)
1260 return EXTRA_REG_RSP_1
;
1262 if (idx
== EXTRA_REG_RSP_1
)
1263 return EXTRA_REG_RSP_0
;
1268 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1270 event
->hw
.extra_reg
.idx
= idx
;
1272 if (idx
== EXTRA_REG_RSP_0
) {
1273 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1274 event
->hw
.config
|= 0x01b7;
1275 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1276 } else if (idx
== EXTRA_REG_RSP_1
) {
1277 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1278 event
->hw
.config
|= 0x01bb;
1279 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1284 * manage allocation of shared extra msr for certain events
1287 * per-cpu: to be shared between the various events on a single PMU
1288 * per-core: per-cpu + shared by HT threads
1290 static struct event_constraint
*
1291 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1292 struct perf_event
*event
,
1293 struct hw_perf_event_extra
*reg
)
1295 struct event_constraint
*c
= &emptyconstraint
;
1296 struct er_account
*era
;
1297 unsigned long flags
;
1301 * reg->alloc can be set due to existing state, so for fake cpuc we
1302 * need to ignore this, otherwise we might fail to allocate proper fake
1303 * state for this extra reg constraint. Also see the comment below.
1305 if (reg
->alloc
&& !cpuc
->is_fake
)
1306 return NULL
; /* call x86_get_event_constraint() */
1309 era
= &cpuc
->shared_regs
->regs
[idx
];
1311 * we use spin_lock_irqsave() to avoid lockdep issues when
1312 * passing a fake cpuc
1314 raw_spin_lock_irqsave(&era
->lock
, flags
);
1316 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1319 * If its a fake cpuc -- as per validate_{group,event}() we
1320 * shouldn't touch event state and we can avoid doing so
1321 * since both will only call get_event_constraints() once
1322 * on each event, this avoids the need for reg->alloc.
1324 * Not doing the ER fixup will only result in era->reg being
1325 * wrong, but since we won't actually try and program hardware
1326 * this isn't a problem either.
1328 if (!cpuc
->is_fake
) {
1329 if (idx
!= reg
->idx
)
1330 intel_fixup_er(event
, idx
);
1333 * x86_schedule_events() can call get_event_constraints()
1334 * multiple times on events in the case of incremental
1335 * scheduling(). reg->alloc ensures we only do the ER
1341 /* lock in msr value */
1342 era
->config
= reg
->config
;
1343 era
->reg
= reg
->reg
;
1346 atomic_inc(&era
->ref
);
1349 * need to call x86_get_event_constraint()
1350 * to check if associated event has constraints
1354 idx
= intel_alt_er(idx
);
1355 if (idx
!= reg
->idx
) {
1356 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1360 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1366 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1367 struct hw_perf_event_extra
*reg
)
1369 struct er_account
*era
;
1372 * Only put constraint if extra reg was actually allocated. Also takes
1373 * care of event which do not use an extra shared reg.
1375 * Also, if this is a fake cpuc we shouldn't touch any event state
1376 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1377 * either since it'll be thrown out.
1379 if (!reg
->alloc
|| cpuc
->is_fake
)
1382 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1384 /* one fewer user */
1385 atomic_dec(&era
->ref
);
1387 /* allocate again next time */
1391 static struct event_constraint
*
1392 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1393 struct perf_event
*event
)
1395 struct event_constraint
*c
= NULL
, *d
;
1396 struct hw_perf_event_extra
*xreg
, *breg
;
1398 xreg
= &event
->hw
.extra_reg
;
1399 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1400 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1401 if (c
== &emptyconstraint
)
1404 breg
= &event
->hw
.branch_reg
;
1405 if (breg
->idx
!= EXTRA_REG_NONE
) {
1406 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1407 if (d
== &emptyconstraint
) {
1408 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1415 struct event_constraint
*
1416 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1418 struct event_constraint
*c
;
1420 if (x86_pmu
.event_constraints
) {
1421 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1422 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
1423 /* hw.flags zeroed at initialization */
1424 event
->hw
.flags
|= c
->flags
;
1430 return &unconstrained
;
1433 static struct event_constraint
*
1434 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1436 struct event_constraint
*c
;
1438 c
= intel_bts_constraints(event
);
1442 c
= intel_pebs_constraints(event
);
1446 c
= intel_shared_regs_constraints(cpuc
, event
);
1450 return x86_get_event_constraints(cpuc
, event
);
1454 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1455 struct perf_event
*event
)
1457 struct hw_perf_event_extra
*reg
;
1459 reg
= &event
->hw
.extra_reg
;
1460 if (reg
->idx
!= EXTRA_REG_NONE
)
1461 __intel_shared_reg_put_constraints(cpuc
, reg
);
1463 reg
= &event
->hw
.branch_reg
;
1464 if (reg
->idx
!= EXTRA_REG_NONE
)
1465 __intel_shared_reg_put_constraints(cpuc
, reg
);
1468 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1469 struct perf_event
*event
)
1471 event
->hw
.flags
= 0;
1472 intel_put_shared_regs_event_constraints(cpuc
, event
);
1475 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1477 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1479 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1480 * (0x003c) so that we can use it with PEBS.
1482 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1483 * PEBS capable. However we can use INST_RETIRED.ANY_P
1484 * (0x00c0), which is a PEBS capable event, to get the same
1487 * INST_RETIRED.ANY_P counts the number of cycles that retires
1488 * CNTMASK instructions. By setting CNTMASK to a value (16)
1489 * larger than the maximum number of instructions that can be
1490 * retired per cycle (4) and then inverting the condition, we
1491 * count all cycles that retire 16 or less instructions, which
1494 * Thereby we gain a PEBS capable cycle counter.
1496 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1498 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1499 event
->hw
.config
= alt_config
;
1503 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1505 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1507 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1508 * (0x003c) so that we can use it with PEBS.
1510 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1511 * PEBS capable. However we can use UOPS_RETIRED.ALL
1512 * (0x01c2), which is a PEBS capable event, to get the same
1515 * UOPS_RETIRED.ALL counts the number of cycles that retires
1516 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1517 * larger than the maximum number of micro-ops that can be
1518 * retired per cycle (4) and then inverting the condition, we
1519 * count all cycles that retire 16 or less micro-ops, which
1522 * Thereby we gain a PEBS capable cycle counter.
1524 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1526 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1527 event
->hw
.config
= alt_config
;
1531 static int intel_pmu_hw_config(struct perf_event
*event
)
1533 int ret
= x86_pmu_hw_config(event
);
1538 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1539 x86_pmu
.pebs_aliases(event
);
1541 if (intel_pmu_needs_lbr_smpl(event
)) {
1542 ret
= intel_pmu_setup_lbr_filter(event
);
1547 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1550 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1553 if (x86_pmu
.version
< 3)
1556 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1559 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1564 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1566 if (x86_pmu
.guest_get_msrs
)
1567 return x86_pmu
.guest_get_msrs(nr
);
1571 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1573 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1575 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1576 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1578 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1579 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1580 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1582 * If PMU counter has PEBS enabled it is not enough to disable counter
1583 * on a guest entry since PEBS memory write can overshoot guest entry
1584 * and corrupt guest memory. Disabling PEBS solves the problem.
1586 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1587 arr
[1].host
= cpuc
->pebs_enabled
;
1594 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1596 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1597 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1600 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1601 struct perf_event
*event
= cpuc
->events
[idx
];
1603 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1604 arr
[idx
].host
= arr
[idx
].guest
= 0;
1606 if (!test_bit(idx
, cpuc
->active_mask
))
1609 arr
[idx
].host
= arr
[idx
].guest
=
1610 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1612 if (event
->attr
.exclude_host
)
1613 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1614 else if (event
->attr
.exclude_guest
)
1615 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1618 *nr
= x86_pmu
.num_counters
;
1622 static void core_pmu_enable_event(struct perf_event
*event
)
1624 if (!event
->attr
.exclude_host
)
1625 x86_pmu_enable_event(event
);
1628 static void core_pmu_enable_all(int added
)
1630 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1633 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1634 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1636 if (!test_bit(idx
, cpuc
->active_mask
) ||
1637 cpuc
->events
[idx
]->attr
.exclude_host
)
1640 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1644 PMU_FORMAT_ATTR(event
, "config:0-7" );
1645 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1646 PMU_FORMAT_ATTR(edge
, "config:18" );
1647 PMU_FORMAT_ATTR(pc
, "config:19" );
1648 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1649 PMU_FORMAT_ATTR(inv
, "config:23" );
1650 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1652 static struct attribute
*intel_arch_formats_attr
[] = {
1653 &format_attr_event
.attr
,
1654 &format_attr_umask
.attr
,
1655 &format_attr_edge
.attr
,
1656 &format_attr_pc
.attr
,
1657 &format_attr_inv
.attr
,
1658 &format_attr_cmask
.attr
,
1662 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
1664 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
1666 return x86_event_sysfs_show(page
, config
, event
);
1669 static __initconst
const struct x86_pmu core_pmu
= {
1671 .handle_irq
= x86_pmu_handle_irq
,
1672 .disable_all
= x86_pmu_disable_all
,
1673 .enable_all
= core_pmu_enable_all
,
1674 .enable
= core_pmu_enable_event
,
1675 .disable
= x86_pmu_disable_event
,
1676 .hw_config
= x86_pmu_hw_config
,
1677 .schedule_events
= x86_schedule_events
,
1678 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1679 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1680 .event_map
= intel_pmu_event_map
,
1681 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1684 * Intel PMCs cannot be accessed sanely above 32 bit width,
1685 * so we install an artificial 1<<31 period regardless of
1686 * the generic event period:
1688 .max_period
= (1ULL << 31) - 1,
1689 .get_event_constraints
= intel_get_event_constraints
,
1690 .put_event_constraints
= intel_put_event_constraints
,
1691 .event_constraints
= intel_core_event_constraints
,
1692 .guest_get_msrs
= core_guest_get_msrs
,
1693 .format_attrs
= intel_arch_formats_attr
,
1694 .events_sysfs_show
= intel_event_sysfs_show
,
1697 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1699 struct intel_shared_regs
*regs
;
1702 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1703 GFP_KERNEL
, cpu_to_node(cpu
));
1706 * initialize the locks to keep lockdep happy
1708 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1709 raw_spin_lock_init(®s
->regs
[i
].lock
);
1716 static int intel_pmu_cpu_prepare(int cpu
)
1718 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1720 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1723 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1724 if (!cpuc
->shared_regs
)
1730 static void intel_pmu_cpu_starting(int cpu
)
1732 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1733 int core_id
= topology_core_id(cpu
);
1736 init_debug_store_on_cpu(cpu
);
1738 * Deal with CPUs that don't clear their LBRs on power-up.
1740 intel_pmu_lbr_reset();
1742 cpuc
->lbr_sel
= NULL
;
1744 if (!cpuc
->shared_regs
)
1747 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
1748 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1749 struct intel_shared_regs
*pc
;
1751 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1752 if (pc
&& pc
->core_id
== core_id
) {
1753 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
1754 cpuc
->shared_regs
= pc
;
1758 cpuc
->shared_regs
->core_id
= core_id
;
1759 cpuc
->shared_regs
->refcnt
++;
1762 if (x86_pmu
.lbr_sel_map
)
1763 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
1766 static void intel_pmu_cpu_dying(int cpu
)
1768 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1769 struct intel_shared_regs
*pc
;
1771 pc
= cpuc
->shared_regs
;
1773 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1775 cpuc
->shared_regs
= NULL
;
1778 fini_debug_store_on_cpu(cpu
);
1781 static void intel_pmu_flush_branch_stack(void)
1784 * Intel LBR does not tag entries with the
1785 * PID of the current task, then we need to
1787 * For now, we simply reset it
1790 intel_pmu_lbr_reset();
1793 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
1795 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
1797 static struct attribute
*intel_arch3_formats_attr
[] = {
1798 &format_attr_event
.attr
,
1799 &format_attr_umask
.attr
,
1800 &format_attr_edge
.attr
,
1801 &format_attr_pc
.attr
,
1802 &format_attr_any
.attr
,
1803 &format_attr_inv
.attr
,
1804 &format_attr_cmask
.attr
,
1806 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
1807 &format_attr_ldlat
.attr
, /* PEBS load latency */
1811 static __initconst
const struct x86_pmu intel_pmu
= {
1813 .handle_irq
= intel_pmu_handle_irq
,
1814 .disable_all
= intel_pmu_disable_all
,
1815 .enable_all
= intel_pmu_enable_all
,
1816 .enable
= intel_pmu_enable_event
,
1817 .disable
= intel_pmu_disable_event
,
1818 .hw_config
= intel_pmu_hw_config
,
1819 .schedule_events
= x86_schedule_events
,
1820 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1821 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1822 .event_map
= intel_pmu_event_map
,
1823 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1826 * Intel PMCs cannot be accessed sanely above 32 bit width,
1827 * so we install an artificial 1<<31 period regardless of
1828 * the generic event period:
1830 .max_period
= (1ULL << 31) - 1,
1831 .get_event_constraints
= intel_get_event_constraints
,
1832 .put_event_constraints
= intel_put_event_constraints
,
1833 .pebs_aliases
= intel_pebs_aliases_core2
,
1835 .format_attrs
= intel_arch3_formats_attr
,
1836 .events_sysfs_show
= intel_event_sysfs_show
,
1838 .cpu_prepare
= intel_pmu_cpu_prepare
,
1839 .cpu_starting
= intel_pmu_cpu_starting
,
1840 .cpu_dying
= intel_pmu_cpu_dying
,
1841 .guest_get_msrs
= intel_guest_get_msrs
,
1842 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
1845 static __init
void intel_clovertown_quirk(void)
1848 * PEBS is unreliable due to:
1850 * AJ67 - PEBS may experience CPL leaks
1851 * AJ68 - PEBS PMI may be delayed by one event
1852 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1853 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1855 * AJ67 could be worked around by restricting the OS/USR flags.
1856 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1858 * AJ106 could possibly be worked around by not allowing LBR
1859 * usage from PEBS, including the fixup.
1860 * AJ68 could possibly be worked around by always programming
1861 * a pebs_event_reset[0] value and coping with the lost events.
1863 * But taken together it might just make sense to not enable PEBS on
1866 pr_warn("PEBS disabled due to CPU errata\n");
1868 x86_pmu
.pebs_constraints
= NULL
;
1871 static int intel_snb_pebs_broken(int cpu
)
1873 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
1875 switch (cpu_data(cpu
).x86_model
) {
1880 case 45: /* SNB-EP */
1881 switch (cpu_data(cpu
).x86_mask
) {
1882 case 6: rev
= 0x618; break;
1883 case 7: rev
= 0x70c; break;
1887 return (cpu_data(cpu
).microcode
< rev
);
1890 static void intel_snb_check_microcode(void)
1892 int pebs_broken
= 0;
1896 for_each_online_cpu(cpu
) {
1897 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
1902 if (pebs_broken
== x86_pmu
.pebs_broken
)
1906 * Serialized by the microcode lock..
1908 if (x86_pmu
.pebs_broken
) {
1909 pr_info("PEBS enabled due to microcode update\n");
1910 x86_pmu
.pebs_broken
= 0;
1912 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1913 x86_pmu
.pebs_broken
= 1;
1917 static __init
void intel_sandybridge_quirk(void)
1919 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
1920 intel_snb_check_microcode();
1923 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
1924 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
1925 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
1926 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
1927 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
1928 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
1929 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
1930 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
1933 static __init
void intel_arch_events_quirk(void)
1937 /* disable event that reported as not presend by cpuid */
1938 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
1939 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
1940 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1941 intel_arch_events_map
[bit
].name
);
1945 static __init
void intel_nehalem_quirk(void)
1947 union cpuid10_ebx ebx
;
1949 ebx
.full
= x86_pmu
.events_maskl
;
1950 if (ebx
.split
.no_branch_misses_retired
) {
1952 * Erratum AAJ80 detected, we work it around by using
1953 * the BR_MISP_EXEC.ANY event. This will over-count
1954 * branch-misses, but it's still much better than the
1955 * architectural event which is often completely bogus:
1957 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1958 ebx
.split
.no_branch_misses_retired
= 0;
1959 x86_pmu
.events_maskl
= ebx
.full
;
1960 pr_info("CPU erratum AAJ80 worked around\n");
1964 __init
int intel_pmu_init(void)
1966 union cpuid10_edx edx
;
1967 union cpuid10_eax eax
;
1968 union cpuid10_ebx ebx
;
1969 struct event_constraint
*c
;
1970 unsigned int unused
;
1973 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1974 switch (boot_cpu_data
.x86
) {
1976 return p6_pmu_init();
1978 return knc_pmu_init();
1980 return p4_pmu_init();
1986 * Check whether the Architectural PerfMon supports
1987 * Branch Misses Retired hw_event or not.
1989 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
1990 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
1993 version
= eax
.split
.version_id
;
1997 x86_pmu
= intel_pmu
;
1999 x86_pmu
.version
= version
;
2000 x86_pmu
.num_counters
= eax
.split
.num_counters
;
2001 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
2002 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
2004 x86_pmu
.events_maskl
= ebx
.full
;
2005 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
2007 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
2010 * Quirk: v2 perfmon does not report fixed-purpose events, so
2011 * assume at least 3 events:
2014 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
2017 * v2 and above have a perf capabilities MSR
2022 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
2023 x86_pmu
.intel_cap
.capabilities
= capabilities
;
2028 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
2031 * Install the hw-cache-events table:
2033 switch (boot_cpu_data
.x86_model
) {
2034 case 14: /* 65 nm core solo/duo, "Yonah" */
2035 pr_cont("Core events, ");
2038 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2039 x86_add_quirk(intel_clovertown_quirk
);
2040 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2041 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2042 case 29: /* six-core 45 nm xeon "Dunnington" */
2043 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2044 sizeof(hw_cache_event_ids
));
2046 intel_pmu_lbr_init_core();
2048 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2049 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2050 pr_cont("Core2 events, ");
2053 case 26: /* 45 nm nehalem, "Bloomfield" */
2054 case 30: /* 45 nm nehalem, "Lynnfield" */
2055 case 46: /* 45 nm nehalem-ex, "Beckton" */
2056 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2057 sizeof(hw_cache_event_ids
));
2058 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2059 sizeof(hw_cache_extra_regs
));
2061 intel_pmu_lbr_init_nhm();
2063 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2064 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2065 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2066 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2068 x86_pmu
.cpu_events
= nhm_events_attrs
;
2070 /* UOPS_ISSUED.STALLED_CYCLES */
2071 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2072 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2073 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2074 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2075 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2077 x86_add_quirk(intel_nehalem_quirk
);
2079 pr_cont("Nehalem events, ");
2083 case 38: /* Lincroft */
2084 case 39: /* Penwell */
2085 case 53: /* Cloverview */
2086 case 54: /* Cedarview */
2087 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2088 sizeof(hw_cache_event_ids
));
2090 intel_pmu_lbr_init_atom();
2092 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2093 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2094 pr_cont("Atom events, ");
2097 case 37: /* 32 nm nehalem, "Clarkdale" */
2098 case 44: /* 32 nm nehalem, "Gulftown" */
2099 case 47: /* 32 nm Xeon E7 */
2100 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2101 sizeof(hw_cache_event_ids
));
2102 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2103 sizeof(hw_cache_extra_regs
));
2105 intel_pmu_lbr_init_nhm();
2107 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2108 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2109 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2110 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2111 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2113 x86_pmu
.cpu_events
= nhm_events_attrs
;
2115 /* UOPS_ISSUED.STALLED_CYCLES */
2116 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2117 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2118 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2119 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2120 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2122 pr_cont("Westmere events, ");
2125 case 42: /* SandyBridge */
2126 case 45: /* SandyBridge, "Romely-EP" */
2127 x86_add_quirk(intel_sandybridge_quirk
);
2128 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2129 sizeof(hw_cache_event_ids
));
2130 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2131 sizeof(hw_cache_extra_regs
));
2133 intel_pmu_lbr_init_snb();
2135 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2136 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2137 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2138 if (boot_cpu_data
.x86_model
== 45)
2139 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2141 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2142 /* all extra regs are per-cpu when HT is on */
2143 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2144 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2146 x86_pmu
.cpu_events
= snb_events_attrs
;
2148 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2149 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2150 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2151 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2152 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2153 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2155 pr_cont("SandyBridge events, ");
2157 case 58: /* IvyBridge */
2158 case 62: /* IvyBridge EP */
2159 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2160 sizeof(hw_cache_event_ids
));
2161 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2162 sizeof(hw_cache_extra_regs
));
2164 intel_pmu_lbr_init_snb();
2166 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2167 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2168 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2169 if (boot_cpu_data
.x86_model
== 62)
2170 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2172 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2173 /* all extra regs are per-cpu when HT is on */
2174 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2175 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2177 x86_pmu
.cpu_events
= snb_events_attrs
;
2179 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2180 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2181 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2183 pr_cont("IvyBridge events, ");
2188 switch (x86_pmu
.version
) {
2190 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2191 pr_cont("generic architected perfmon v1, ");
2195 * default constraints for v2 and up
2197 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2198 pr_cont("generic architected perfmon, ");
2203 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2204 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2205 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2206 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2208 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2210 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2211 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2212 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2213 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2216 x86_pmu
.intel_ctrl
|=
2217 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2219 if (x86_pmu
.event_constraints
) {
2221 * event on fixed counter2 (REF_CYCLES) only works on this
2222 * counter, so do not extend mask to generic counters
2224 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2225 if (c
->cmask
!= X86_RAW_EVENT_MASK
2226 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2230 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2231 c
->weight
+= x86_pmu
.num_counters
;