Commit | Line | Data |
---|---|---|
a072738e CG |
1 | /* |
2 | * Netburst Perfomance Events (P4, old Xeon) | |
3 | * | |
4 | * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org> | |
5 | * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com> | |
6 | * | |
7 | * For licencing details see kernel-base/COPYING | |
8 | */ | |
9 | ||
10 | #ifdef CONFIG_CPU_SUP_INTEL | |
11 | ||
12 | #include <asm/perf_event_p4.h> | |
13 | ||
d814f301 | 14 | #define P4_CNTR_LIMIT 3 |
a072738e CG |
15 | /* |
16 | * array indices: 0,1 - HT threads, used with HT enabled cpu | |
17 | */ | |
d814f301 CG |
18 | struct p4_event_bind { |
19 | unsigned int opcode; /* Event code and ESCR selector */ | |
20 | unsigned int escr_msr[2]; /* ESCR MSR for this event */ | |
21 | unsigned char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ | |
a072738e CG |
22 | }; |
23 | ||
d814f301 CG |
24 | struct p4_cache_event_bind { |
25 | unsigned int metric_pebs; | |
26 | unsigned int metric_vert; | |
a072738e CG |
27 | }; |
28 | ||
d814f301 CG |
29 | #define P4_GEN_CACHE_EVENT_BIND(name) \ |
30 | [P4_CACHE__##name] = { \ | |
31 | .metric_pebs = P4_PEBS__##name, \ | |
32 | .metric_vert = P4_VERT__##name, \ | |
33 | } | |
34 | ||
35 | static struct p4_cache_event_bind p4_cache_event_bind_map[] = { | |
36 | P4_GEN_CACHE_EVENT_BIND(1stl_cache_load_miss_retired), | |
37 | P4_GEN_CACHE_EVENT_BIND(2ndl_cache_load_miss_retired), | |
38 | P4_GEN_CACHE_EVENT_BIND(dtlb_load_miss_retired), | |
39 | P4_GEN_CACHE_EVENT_BIND(dtlb_store_miss_retired), | |
40 | }; | |
41 | ||
42 | /* | |
43 | * Note that we don't use CCCR1 here, there is an | |
44 | * exception for P4_BSQ_ALLOCATION but we just have | |
45 | * no workaround | |
46 | * | |
47 | * consider this binding as resources which particular | |
48 | * event may borrow, it doesn't contain EventMask, | |
49 | * Tags and friends -- they are left to a caller | |
50 | */ | |
51 | static struct p4_event_bind p4_event_bind_map[] = { | |
52 | [P4_EVENT_TC_DELIVER_MODE] = { | |
53 | .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE), | |
54 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, | |
55 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | |
56 | }, | |
57 | [P4_EVENT_BPU_FETCH_REQUEST] = { | |
58 | .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST), | |
59 | .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 }, | |
60 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
61 | }, | |
62 | [P4_EVENT_ITLB_REFERENCE] = { | |
63 | .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE), | |
64 | .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, | |
65 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
66 | }, | |
67 | [P4_EVENT_MEMORY_CANCEL] = { | |
68 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL), | |
69 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, | |
70 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
71 | }, | |
72 | [P4_EVENT_MEMORY_COMPLETE] = { | |
73 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE), | |
74 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, | |
75 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
76 | }, | |
77 | [P4_EVENT_LOAD_PORT_REPLAY] = { | |
78 | .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY), | |
79 | .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 }, | |
80 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
81 | }, | |
82 | [P4_EVENT_STORE_PORT_REPLAY] = { | |
83 | .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY), | |
84 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, | |
85 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
86 | }, | |
87 | [P4_EVENT_MOB_LOAD_REPLAY] = { | |
88 | .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY), | |
89 | .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 }, | |
90 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
91 | }, | |
92 | [P4_EVENT_PAGE_WALK_TYPE] = { | |
93 | .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE), | |
94 | .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 }, | |
95 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
96 | }, | |
97 | [P4_EVENT_BSQ_CACHE_REFERENCE] = { | |
98 | .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE), | |
99 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, | |
100 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
101 | }, | |
102 | [P4_EVENT_IOQ_ALLOCATION] = { | |
103 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION), | |
104 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | |
105 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
106 | }, | |
107 | [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */ | |
108 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES), | |
109 | .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 }, | |
110 | .cntr = { {2, -1, -1}, {3, -1, -1} }, | |
111 | }, | |
112 | [P4_EVENT_FSB_DATA_ACTIVITY] = { | |
113 | .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY), | |
114 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | |
115 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
116 | }, | |
117 | [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */ | |
118 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION), | |
119 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 }, | |
120 | .cntr = { {0, -1, -1}, {1, -1, -1} }, | |
121 | }, | |
122 | [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */ | |
123 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES), | |
124 | .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 }, | |
125 | .cntr = { {2, -1, -1}, {3, -1, -1} }, | |
126 | }, | |
127 | [P4_EVENT_SSE_INPUT_ASSIST] = { | |
128 | .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST), | |
129 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | |
130 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
131 | }, | |
132 | [P4_EVENT_PACKED_SP_UOP] = { | |
133 | .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP), | |
134 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | |
135 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
136 | }, | |
137 | [P4_EVENT_PACKED_DP_UOP] = { | |
138 | .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP), | |
139 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | |
140 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
141 | }, | |
142 | [P4_EVENT_SCALAR_SP_UOP] = { | |
143 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP), | |
144 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | |
145 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
146 | }, | |
147 | [P4_EVENT_SCALAR_DP_UOP] = { | |
148 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP), | |
149 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | |
150 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
151 | }, | |
152 | [P4_EVENT_64BIT_MMX_UOP] = { | |
153 | .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP), | |
154 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | |
155 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
156 | }, | |
157 | [P4_EVENT_128BIT_MMX_UOP] = { | |
158 | .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP), | |
159 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | |
160 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
161 | }, | |
162 | [P4_EVENT_X87_FP_UOP] = { | |
163 | .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP), | |
164 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | |
165 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
166 | }, | |
167 | [P4_EVENT_TC_MISC] = { | |
168 | .opcode = P4_OPCODE(P4_EVENT_TC_MISC), | |
169 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, | |
170 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | |
171 | }, | |
172 | [P4_EVENT_GLOBAL_POWER_EVENTS] = { | |
173 | .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS), | |
174 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | |
175 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
176 | }, | |
177 | [P4_EVENT_TC_MS_XFER] = { | |
178 | .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER), | |
179 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, | |
180 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | |
181 | }, | |
182 | [P4_EVENT_UOP_QUEUE_WRITES] = { | |
183 | .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES), | |
184 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, | |
185 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | |
186 | }, | |
187 | [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = { | |
188 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE), | |
189 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 }, | |
190 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | |
191 | }, | |
192 | [P4_EVENT_RETIRED_BRANCH_TYPE] = { | |
193 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE), | |
194 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 }, | |
195 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | |
196 | }, | |
197 | [P4_EVENT_RESOURCE_STALL] = { | |
198 | .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL), | |
199 | .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 }, | |
200 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
201 | }, | |
202 | [P4_EVENT_WC_BUFFER] = { | |
203 | .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER), | |
204 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, | |
205 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | |
206 | }, | |
207 | [P4_EVENT_B2B_CYCLES] = { | |
208 | .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES), | |
209 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | |
210 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
211 | }, | |
212 | [P4_EVENT_BNR] = { | |
213 | .opcode = P4_OPCODE(P4_EVENT_BNR), | |
214 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | |
215 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
216 | }, | |
217 | [P4_EVENT_SNOOP] = { | |
218 | .opcode = P4_OPCODE(P4_EVENT_SNOOP), | |
219 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | |
220 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
221 | }, | |
222 | [P4_EVENT_RESPONSE] = { | |
223 | .opcode = P4_OPCODE(P4_EVENT_RESPONSE), | |
224 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | |
225 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | |
226 | }, | |
227 | [P4_EVENT_FRONT_END_EVENT] = { | |
228 | .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT), | |
229 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | |
230 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
231 | }, | |
232 | [P4_EVENT_EXECUTION_EVENT] = { | |
233 | .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT), | |
234 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | |
235 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
236 | }, | |
237 | [P4_EVENT_REPLAY_EVENT] = { | |
238 | .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT), | |
239 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | |
240 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
241 | }, | |
242 | [P4_EVENT_INSTR_RETIRED] = { | |
243 | .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED), | |
244 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | |
245 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
246 | }, | |
247 | [P4_EVENT_UOPS_RETIRED] = { | |
248 | .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED), | |
249 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | |
250 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
251 | }, | |
252 | [P4_EVENT_UOP_TYPE] = { | |
253 | .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE), | |
254 | .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 }, | |
255 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
256 | }, | |
257 | [P4_EVENT_BRANCH_RETIRED] = { | |
258 | .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED), | |
259 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | |
260 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
261 | }, | |
262 | [P4_EVENT_MISPRED_BRANCH_RETIRED] = { | |
263 | .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), | |
264 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | |
265 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
266 | }, | |
267 | [P4_EVENT_X87_ASSIST] = { | |
268 | .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST), | |
269 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | |
270 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
271 | }, | |
272 | [P4_EVENT_MACHINE_CLEAR] = { | |
273 | .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR), | |
274 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | |
275 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
276 | }, | |
277 | [P4_EVENT_INSTR_COMPLETED] = { | |
278 | .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED), | |
279 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | |
280 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | |
281 | }, | |
282 | }; | |
a072738e | 283 | |
d814f301 CG |
284 | #define P4_GEN_CACHE_EVENT(event, bit, cache_event) \ |
285 | p4_config_pack_escr(P4_ESCR_EVENT(event) | \ | |
286 | P4_ESCR_EMASK_BIT(event, bit)) | \ | |
287 | p4_config_pack_cccr(cache_event | \ | |
288 | P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event)))) | |
cb7d6b50 | 289 | |
caaa8be3 | 290 | static __initconst const u64 p4_hw_cache_event_ids |
cb7d6b50 LM |
291 | [PERF_COUNT_HW_CACHE_MAX] |
292 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
293 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
294 | { | |
295 | [ C(L1D ) ] = { | |
296 | [ C(OP_READ) ] = { | |
297 | [ C(RESULT_ACCESS) ] = 0x0, | |
d814f301 CG |
298 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS, |
299 | P4_CACHE__1stl_cache_load_miss_retired), | |
cb7d6b50 LM |
300 | }, |
301 | }, | |
302 | [ C(LL ) ] = { | |
303 | [ C(OP_READ) ] = { | |
304 | [ C(RESULT_ACCESS) ] = 0x0, | |
d814f301 CG |
305 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS, |
306 | P4_CACHE__2ndl_cache_load_miss_retired), | |
cb7d6b50 | 307 | }, |
d814f301 | 308 | }, |
cb7d6b50 LM |
309 | [ C(DTLB) ] = { |
310 | [ C(OP_READ) ] = { | |
311 | [ C(RESULT_ACCESS) ] = 0x0, | |
d814f301 CG |
312 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS, |
313 | P4_CACHE__dtlb_load_miss_retired), | |
cb7d6b50 LM |
314 | }, |
315 | [ C(OP_WRITE) ] = { | |
316 | [ C(RESULT_ACCESS) ] = 0x0, | |
d814f301 CG |
317 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS, |
318 | P4_CACHE__dtlb_store_miss_retired), | |
cb7d6b50 LM |
319 | }, |
320 | }, | |
321 | [ C(ITLB) ] = { | |
322 | [ C(OP_READ) ] = { | |
d814f301 CG |
323 | [ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT, |
324 | P4_CACHE__itlb_reference_hit), | |
325 | [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS, | |
326 | P4_CACHE__itlb_reference_miss), | |
cb7d6b50 LM |
327 | }, |
328 | [ C(OP_WRITE) ] = { | |
329 | [ C(RESULT_ACCESS) ] = -1, | |
330 | [ C(RESULT_MISS) ] = -1, | |
331 | }, | |
332 | [ C(OP_PREFETCH) ] = { | |
333 | [ C(RESULT_ACCESS) ] = -1, | |
334 | [ C(RESULT_MISS) ] = -1, | |
335 | }, | |
336 | }, | |
337 | }; | |
338 | ||
d814f301 CG |
339 | static u64 p4_general_events[PERF_COUNT_HW_MAX] = { |
340 | /* non-halted CPU clocks */ | |
341 | [PERF_COUNT_HW_CPU_CYCLES] = | |
342 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) | | |
343 | P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)), | |
344 | ||
345 | /* | |
346 | * retired instructions | |
347 | * in a sake of simplicity we don't use the FSB tagging | |
348 | */ | |
349 | [PERF_COUNT_HW_INSTRUCTIONS] = | |
350 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) | | |
351 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) | | |
352 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)), | |
353 | ||
354 | /* cache hits */ | |
355 | [PERF_COUNT_HW_CACHE_REFERENCES] = | |
356 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) | | |
357 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) | | |
358 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) | | |
359 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) | | |
360 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) | | |
361 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) | | |
362 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)), | |
363 | ||
364 | /* cache misses */ | |
365 | [PERF_COUNT_HW_CACHE_MISSES] = | |
366 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) | | |
367 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) | | |
368 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) | | |
369 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)), | |
370 | ||
371 | /* branch instructions retired */ | |
372 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = | |
373 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) | | |
374 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) | | |
375 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) | | |
376 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) | | |
377 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)), | |
378 | ||
379 | /* mispredicted branches retired */ | |
380 | [PERF_COUNT_HW_BRANCH_MISSES] = | |
381 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) | | |
382 | P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)), | |
383 | ||
384 | /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */ | |
385 | [PERF_COUNT_HW_BUS_CYCLES] = | |
386 | p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) | | |
387 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) | | |
388 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) | | |
389 | p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE), | |
a072738e CG |
390 | }; |
391 | ||
d814f301 CG |
392 | static struct p4_event_bind *p4_config_get_bind(u64 config) |
393 | { | |
394 | unsigned int evnt = p4_config_unpack_event(config); | |
395 | struct p4_event_bind *bind = NULL; | |
396 | ||
397 | if (evnt < ARRAY_SIZE(p4_event_bind_map)) | |
398 | bind = &p4_event_bind_map[evnt]; | |
399 | ||
400 | return bind; | |
401 | } | |
402 | ||
a072738e CG |
403 | static u64 p4_pmu_event_map(int hw_event) |
404 | { | |
d814f301 CG |
405 | struct p4_event_bind *bind; |
406 | unsigned int esel; | |
a072738e CG |
407 | u64 config; |
408 | ||
d814f301 CG |
409 | if (hw_event > ARRAY_SIZE(p4_general_events)) { |
410 | printk_once(KERN_ERR "P4 PMU: Bad index: %i\n", hw_event); | |
a072738e CG |
411 | return 0; |
412 | } | |
a072738e | 413 | |
d814f301 CG |
414 | config = p4_general_events[hw_event]; |
415 | bind = p4_config_get_bind(config); | |
416 | esel = P4_OPCODE_ESEL(bind->opcode); | |
417 | config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel)); | |
a072738e | 418 | |
a072738e CG |
419 | return config; |
420 | } | |
421 | ||
b4cdc5c2 | 422 | static int p4_hw_config(struct perf_event *event) |
a072738e CG |
423 | { |
424 | int cpu = raw_smp_processor_id(); | |
d814f301 | 425 | u32 escr, cccr; |
a072738e CG |
426 | |
427 | /* | |
428 | * the reason we use cpu that early is that: if we get scheduled | |
429 | * first time on the same cpu -- we will not need swap thread | |
430 | * specific flags in config (and will save some cpu cycles) | |
431 | */ | |
432 | ||
d814f301 | 433 | cccr = p4_default_cccr_conf(cpu); |
b4cdc5c2 PZ |
434 | escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel, |
435 | event->attr.exclude_user); | |
436 | event->hw.config = p4_config_pack_escr(escr) | | |
437 | p4_config_pack_cccr(cccr); | |
a072738e | 438 | |
cb7d6b50 | 439 | if (p4_ht_active() && p4_ht_thread(cpu)) |
b4cdc5c2 PZ |
440 | event->hw.config = p4_set_ht_bit(event->hw.config); |
441 | ||
442 | if (event->attr.type != PERF_TYPE_RAW) | |
443 | return 0; | |
444 | ||
445 | /* | |
446 | * We don't control raw events so it's up to the caller | |
447 | * to pass sane values (and we don't count the thread number | |
448 | * on HT machine but allow HT-compatible specifics to be | |
449 | * passed on) | |
450 | * | |
451 | * XXX: HT wide things should check perf_paranoid_cpu() && | |
452 | * CAP_SYS_ADMIN | |
453 | */ | |
454 | event->hw.config |= event->attr.config & | |
455 | (p4_config_pack_escr(P4_ESCR_MASK_HT) | | |
456 | p4_config_pack_cccr(P4_CCCR_MASK_HT)); | |
cb7d6b50 | 457 | |
9d0fcba6 | 458 | return x86_setup_perfctr(event); |
a072738e CG |
459 | } |
460 | ||
461 | static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) | |
462 | { | |
463 | unsigned long dummy; | |
464 | ||
465 | rdmsrl(hwc->config_base + hwc->idx, dummy); | |
466 | if (dummy & P4_CCCR_OVF) { | |
467 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | |
468 | ((u64)dummy) & ~P4_CCCR_OVF); | |
469 | } | |
470 | } | |
471 | ||
472 | static inline void p4_pmu_disable_event(struct perf_event *event) | |
473 | { | |
474 | struct hw_perf_event *hwc = &event->hw; | |
475 | ||
476 | /* | |
477 | * If event gets disabled while counter is in overflowed | |
478 | * state we need to clear P4_CCCR_OVF, otherwise interrupt get | |
479 | * asserted again and again | |
480 | */ | |
481 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | |
482 | (u64)(p4_config_unpack_cccr(hwc->config)) & | |
d814f301 | 483 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); |
a072738e CG |
484 | } |
485 | ||
486 | static void p4_pmu_disable_all(void) | |
487 | { | |
488 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
489 | int idx; | |
490 | ||
948b1bb8 | 491 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
a072738e CG |
492 | struct perf_event *event = cpuc->events[idx]; |
493 | if (!test_bit(idx, cpuc->active_mask)) | |
494 | continue; | |
495 | p4_pmu_disable_event(event); | |
496 | } | |
497 | } | |
498 | ||
499 | static void p4_pmu_enable_event(struct perf_event *event) | |
500 | { | |
501 | struct hw_perf_event *hwc = &event->hw; | |
502 | int thread = p4_ht_config_thread(hwc->config); | |
503 | u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config)); | |
d814f301 CG |
504 | unsigned int idx = p4_config_unpack_event(hwc->config); |
505 | unsigned int idx_cache = p4_config_unpack_cache_event(hwc->config); | |
506 | struct p4_event_bind *bind; | |
507 | struct p4_cache_event_bind *bind_cache; | |
508 | u64 escr_addr, cccr; | |
a072738e | 509 | |
d814f301 CG |
510 | bind = &p4_event_bind_map[idx]; |
511 | escr_addr = (u64)bind->escr_msr[thread]; | |
a072738e CG |
512 | |
513 | /* | |
514 | * - we dont support cascaded counters yet | |
515 | * - and counter 1 is broken (erratum) | |
516 | */ | |
517 | WARN_ON_ONCE(p4_is_event_cascaded(hwc->config)); | |
518 | WARN_ON_ONCE(hwc->idx == 1); | |
519 | ||
d814f301 CG |
520 | /* we need a real Event value */ |
521 | escr_conf &= ~P4_ESCR_EVENT_MASK; | |
522 | escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode)); | |
523 | ||
524 | cccr = p4_config_unpack_cccr(hwc->config); | |
525 | ||
526 | /* | |
527 | * it could be Cache event so that we need to | |
528 | * set metrics into additional MSRs | |
529 | */ | |
530 | BUILD_BUG_ON(P4_CACHE__MAX > P4_CCCR_CACHE_OPS_MASK); | |
531 | if (idx_cache > P4_CACHE__NONE && | |
532 | idx_cache < ARRAY_SIZE(p4_cache_event_bind_map)) { | |
533 | bind_cache = &p4_cache_event_bind_map[idx_cache]; | |
534 | (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind_cache->metric_pebs); | |
535 | (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind_cache->metric_vert); | |
536 | } | |
537 | ||
538 | (void)checking_wrmsrl(escr_addr, escr_conf); | |
a072738e | 539 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, |
d814f301 | 540 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); |
a072738e CG |
541 | } |
542 | ||
11164cd4 | 543 | static void p4_pmu_enable_all(int added) |
a072738e CG |
544 | { |
545 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
546 | int idx; | |
547 | ||
948b1bb8 | 548 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
a072738e CG |
549 | struct perf_event *event = cpuc->events[idx]; |
550 | if (!test_bit(idx, cpuc->active_mask)) | |
551 | continue; | |
552 | p4_pmu_enable_event(event); | |
553 | } | |
554 | } | |
555 | ||
556 | static int p4_pmu_handle_irq(struct pt_regs *regs) | |
557 | { | |
558 | struct perf_sample_data data; | |
559 | struct cpu_hw_events *cpuc; | |
560 | struct perf_event *event; | |
561 | struct hw_perf_event *hwc; | |
562 | int idx, handled = 0; | |
563 | u64 val; | |
564 | ||
565 | data.addr = 0; | |
566 | data.raw = NULL; | |
567 | ||
568 | cpuc = &__get_cpu_var(cpu_hw_events); | |
569 | ||
948b1bb8 | 570 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
a072738e CG |
571 | |
572 | if (!test_bit(idx, cpuc->active_mask)) | |
573 | continue; | |
574 | ||
575 | event = cpuc->events[idx]; | |
576 | hwc = &event->hw; | |
577 | ||
578 | WARN_ON_ONCE(hwc->idx != idx); | |
579 | ||
580 | /* | |
581 | * FIXME: Redundant call, actually not needed | |
582 | * but just to check if we're screwed | |
583 | */ | |
584 | p4_pmu_clear_cccr_ovf(hwc); | |
585 | ||
586 | val = x86_perf_event_update(event); | |
948b1bb8 | 587 | if (val & (1ULL << (x86_pmu.cntval_bits - 1))) |
a072738e CG |
588 | continue; |
589 | ||
590 | /* | |
591 | * event overflow | |
592 | */ | |
593 | handled = 1; | |
594 | data.period = event->hw.last_period; | |
595 | ||
596 | if (!x86_perf_event_set_period(event)) | |
597 | continue; | |
598 | if (perf_event_overflow(event, 1, &data, regs)) | |
599 | p4_pmu_disable_event(event); | |
600 | } | |
601 | ||
602 | if (handled) { | |
603 | /* p4 quirk: unmask it again */ | |
604 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); | |
605 | inc_irq_stat(apic_perf_irqs); | |
606 | } | |
607 | ||
608 | return handled; | |
609 | } | |
610 | ||
611 | /* | |
612 | * swap thread specific fields according to a thread | |
613 | * we are going to run on | |
614 | */ | |
615 | static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu) | |
616 | { | |
617 | u32 escr, cccr; | |
618 | ||
619 | /* | |
620 | * we either lucky and continue on same cpu or no HT support | |
621 | */ | |
622 | if (!p4_should_swap_ts(hwc->config, cpu)) | |
623 | return; | |
624 | ||
625 | /* | |
626 | * the event is migrated from an another logical | |
627 | * cpu, so we need to swap thread specific flags | |
628 | */ | |
629 | ||
630 | escr = p4_config_unpack_escr(hwc->config); | |
631 | cccr = p4_config_unpack_cccr(hwc->config); | |
632 | ||
633 | if (p4_ht_thread(cpu)) { | |
634 | cccr &= ~P4_CCCR_OVF_PMI_T0; | |
635 | cccr |= P4_CCCR_OVF_PMI_T1; | |
d814f301 CG |
636 | if (escr & P4_ESCR_T0_OS) { |
637 | escr &= ~P4_ESCR_T0_OS; | |
638 | escr |= P4_ESCR_T1_OS; | |
a072738e | 639 | } |
d814f301 CG |
640 | if (escr & P4_ESCR_T0_USR) { |
641 | escr &= ~P4_ESCR_T0_USR; | |
642 | escr |= P4_ESCR_T1_USR; | |
a072738e CG |
643 | } |
644 | hwc->config = p4_config_pack_escr(escr); | |
645 | hwc->config |= p4_config_pack_cccr(cccr); | |
646 | hwc->config |= P4_CONFIG_HT; | |
647 | } else { | |
648 | cccr &= ~P4_CCCR_OVF_PMI_T1; | |
649 | cccr |= P4_CCCR_OVF_PMI_T0; | |
d814f301 CG |
650 | if (escr & P4_ESCR_T1_OS) { |
651 | escr &= ~P4_ESCR_T1_OS; | |
652 | escr |= P4_ESCR_T0_OS; | |
a072738e | 653 | } |
d814f301 CG |
654 | if (escr & P4_ESCR_T1_USR) { |
655 | escr &= ~P4_ESCR_T1_USR; | |
656 | escr |= P4_ESCR_T0_USR; | |
a072738e CG |
657 | } |
658 | hwc->config = p4_config_pack_escr(escr); | |
659 | hwc->config |= p4_config_pack_cccr(cccr); | |
660 | hwc->config &= ~P4_CONFIG_HT; | |
661 | } | |
662 | } | |
663 | ||
664 | /* ESCRs are not sequential in memory so we need a map */ | |
9c8c6bad | 665 | static const unsigned int p4_escr_map[ARCH_P4_TOTAL_ESCR] = { |
a072738e CG |
666 | MSR_P4_ALF_ESCR0, /* 0 */ |
667 | MSR_P4_ALF_ESCR1, /* 1 */ | |
668 | MSR_P4_BPU_ESCR0, /* 2 */ | |
669 | MSR_P4_BPU_ESCR1, /* 3 */ | |
670 | MSR_P4_BSU_ESCR0, /* 4 */ | |
671 | MSR_P4_BSU_ESCR1, /* 5 */ | |
672 | MSR_P4_CRU_ESCR0, /* 6 */ | |
673 | MSR_P4_CRU_ESCR1, /* 7 */ | |
674 | MSR_P4_CRU_ESCR2, /* 8 */ | |
675 | MSR_P4_CRU_ESCR3, /* 9 */ | |
676 | MSR_P4_CRU_ESCR4, /* 10 */ | |
677 | MSR_P4_CRU_ESCR5, /* 11 */ | |
678 | MSR_P4_DAC_ESCR0, /* 12 */ | |
679 | MSR_P4_DAC_ESCR1, /* 13 */ | |
680 | MSR_P4_FIRM_ESCR0, /* 14 */ | |
681 | MSR_P4_FIRM_ESCR1, /* 15 */ | |
682 | MSR_P4_FLAME_ESCR0, /* 16 */ | |
683 | MSR_P4_FLAME_ESCR1, /* 17 */ | |
684 | MSR_P4_FSB_ESCR0, /* 18 */ | |
685 | MSR_P4_FSB_ESCR1, /* 19 */ | |
686 | MSR_P4_IQ_ESCR0, /* 20 */ | |
687 | MSR_P4_IQ_ESCR1, /* 21 */ | |
688 | MSR_P4_IS_ESCR0, /* 22 */ | |
689 | MSR_P4_IS_ESCR1, /* 23 */ | |
690 | MSR_P4_ITLB_ESCR0, /* 24 */ | |
691 | MSR_P4_ITLB_ESCR1, /* 25 */ | |
692 | MSR_P4_IX_ESCR0, /* 26 */ | |
693 | MSR_P4_IX_ESCR1, /* 27 */ | |
694 | MSR_P4_MOB_ESCR0, /* 28 */ | |
695 | MSR_P4_MOB_ESCR1, /* 29 */ | |
696 | MSR_P4_MS_ESCR0, /* 30 */ | |
697 | MSR_P4_MS_ESCR1, /* 31 */ | |
698 | MSR_P4_PMH_ESCR0, /* 32 */ | |
699 | MSR_P4_PMH_ESCR1, /* 33 */ | |
700 | MSR_P4_RAT_ESCR0, /* 34 */ | |
701 | MSR_P4_RAT_ESCR1, /* 35 */ | |
702 | MSR_P4_SAAT_ESCR0, /* 36 */ | |
703 | MSR_P4_SAAT_ESCR1, /* 37 */ | |
704 | MSR_P4_SSU_ESCR0, /* 38 */ | |
705 | MSR_P4_SSU_ESCR1, /* 39 */ | |
706 | MSR_P4_TBPU_ESCR0, /* 40 */ | |
707 | MSR_P4_TBPU_ESCR1, /* 41 */ | |
708 | MSR_P4_TC_ESCR0, /* 42 */ | |
709 | MSR_P4_TC_ESCR1, /* 43 */ | |
710 | MSR_P4_U2L_ESCR0, /* 44 */ | |
711 | MSR_P4_U2L_ESCR1, /* 45 */ | |
712 | }; | |
713 | ||
714 | static int p4_get_escr_idx(unsigned int addr) | |
715 | { | |
716 | unsigned int i; | |
717 | ||
718 | for (i = 0; i < ARRAY_SIZE(p4_escr_map); i++) { | |
719 | if (addr == p4_escr_map[i]) | |
720 | return i; | |
721 | } | |
722 | ||
723 | return -1; | |
724 | } | |
725 | ||
d814f301 CG |
726 | static int p4_next_cntr(int thread, unsigned long *used_mask, |
727 | struct p4_event_bind *bind) | |
728 | { | |
729 | int i = 0, j; | |
730 | ||
731 | for (i = 0; i < P4_CNTR_LIMIT; i++) { | |
732 | j = bind->cntr[thread][i++]; | |
733 | if (j == -1 || !test_bit(j, used_mask)) | |
734 | return j; | |
735 | } | |
736 | ||
737 | return -1; | |
738 | } | |
739 | ||
a072738e CG |
740 | static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) |
741 | { | |
742 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
743 | unsigned long escr_mask[BITS_TO_LONGS(ARCH_P4_TOTAL_ESCR)]; | |
a072738e | 744 | int cpu = raw_smp_processor_id(); |
d814f301 CG |
745 | struct hw_perf_event *hwc; |
746 | struct p4_event_bind *bind; | |
747 | unsigned int i, thread, num; | |
748 | int cntr_idx, escr_idx; | |
a072738e CG |
749 | |
750 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); | |
751 | bitmap_zero(escr_mask, ARCH_P4_TOTAL_ESCR); | |
752 | ||
a072738e | 753 | for (i = 0, num = n; i < n; i++, num--) { |
d814f301 | 754 | |
a072738e | 755 | hwc = &cpuc->event_list[i]->hw; |
a072738e | 756 | thread = p4_ht_thread(cpu); |
d814f301 CG |
757 | bind = p4_config_get_bind(hwc->config); |
758 | escr_idx = p4_get_escr_idx(bind->escr_msr[thread]); | |
a072738e | 759 | |
a072738e | 760 | if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) { |
d814f301 | 761 | cntr_idx = hwc->idx; |
a072738e CG |
762 | if (assign) |
763 | assign[i] = hwc->idx; | |
a072738e CG |
764 | goto reserve; |
765 | } | |
766 | ||
d814f301 CG |
767 | cntr_idx = p4_next_cntr(thread, used_mask, bind); |
768 | if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) | |
a072738e CG |
769 | goto done; |
770 | ||
a072738e | 771 | p4_pmu_swap_config_ts(hwc, cpu); |
d814f301 CG |
772 | if (assign) |
773 | assign[i] = cntr_idx; | |
a072738e | 774 | reserve: |
d814f301 | 775 | set_bit(cntr_idx, used_mask); |
a072738e CG |
776 | set_bit(escr_idx, escr_mask); |
777 | } | |
778 | ||
779 | done: | |
780 | return num ? -ENOSPC : 0; | |
781 | } | |
782 | ||
caaa8be3 | 783 | static __initconst const struct x86_pmu p4_pmu = { |
a072738e CG |
784 | .name = "Netburst P4/Xeon", |
785 | .handle_irq = p4_pmu_handle_irq, | |
786 | .disable_all = p4_pmu_disable_all, | |
787 | .enable_all = p4_pmu_enable_all, | |
788 | .enable = p4_pmu_enable_event, | |
789 | .disable = p4_pmu_disable_event, | |
790 | .eventsel = MSR_P4_BPU_CCCR0, | |
791 | .perfctr = MSR_P4_BPU_PERFCTR0, | |
792 | .event_map = p4_pmu_event_map, | |
d814f301 | 793 | .max_events = ARRAY_SIZE(p4_general_events), |
a072738e CG |
794 | .get_event_constraints = x86_get_event_constraints, |
795 | /* | |
796 | * IF HT disabled we may need to use all | |
797 | * ARCH_P4_MAX_CCCR counters simulaneously | |
798 | * though leave it restricted at moment assuming | |
799 | * HT is on | |
800 | */ | |
948b1bb8 | 801 | .num_counters = ARCH_P4_MAX_CCCR, |
a072738e | 802 | .apic = 1, |
948b1bb8 RR |
803 | .cntval_bits = 40, |
804 | .cntval_mask = (1ULL << 40) - 1, | |
a072738e CG |
805 | .max_period = (1ULL << 39) - 1, |
806 | .hw_config = p4_hw_config, | |
807 | .schedule_events = p4_pmu_schedule_events, | |
808 | }; | |
809 | ||
810 | static __init int p4_pmu_init(void) | |
811 | { | |
812 | unsigned int low, high; | |
813 | ||
814 | /* If we get stripped -- indexig fails */ | |
815 | BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); | |
816 | ||
817 | rdmsr(MSR_IA32_MISC_ENABLE, low, high); | |
818 | if (!(low & (1 << 7))) { | |
819 | pr_cont("unsupported Netburst CPU model %d ", | |
820 | boot_cpu_data.x86_model); | |
821 | return -ENODEV; | |
822 | } | |
823 | ||
cb7d6b50 | 824 | memcpy(hw_cache_event_ids, p4_hw_cache_event_ids, |
d814f301 | 825 | sizeof(hw_cache_event_ids)); |
cb7d6b50 | 826 | |
a072738e CG |
827 | pr_cont("Netburst events, "); |
828 | ||
829 | x86_pmu = p4_pmu; | |
830 | ||
831 | return 0; | |
832 | } | |
833 | ||
834 | #endif /* CONFIG_CPU_SUP_INTEL */ |