Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / cpu / perf_event_intel_ds.c
1 #include <linux/bitops.h>
2 #include <linux/types.h>
3 #include <linux/slab.h>
4
5 #include <asm/perf_event.h>
6 #include <asm/insn.h>
7
8 #include "perf_event.h"
9
10 /* The size of a BTS record in bytes: */
11 #define BTS_RECORD_SIZE 24
12
13 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
14 #define PEBS_BUFFER_SIZE PAGE_SIZE
15
16 /*
17 * pebs_record_32 for p4 and core not supported
18
19 struct pebs_record_32 {
20 u32 flags, ip;
21 u32 ax, bc, cx, dx;
22 u32 si, di, bp, sp;
23 };
24
25 */
26
27 struct pebs_record_core {
28 u64 flags, ip;
29 u64 ax, bx, cx, dx;
30 u64 si, di, bp, sp;
31 u64 r8, r9, r10, r11;
32 u64 r12, r13, r14, r15;
33 };
34
35 struct pebs_record_nhm {
36 u64 flags, ip;
37 u64 ax, bx, cx, dx;
38 u64 si, di, bp, sp;
39 u64 r8, r9, r10, r11;
40 u64 r12, r13, r14, r15;
41 u64 status, dla, dse, lat;
42 };
43
44 void init_debug_store_on_cpu(int cpu)
45 {
46 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
47
48 if (!ds)
49 return;
50
51 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
52 (u32)((u64)(unsigned long)ds),
53 (u32)((u64)(unsigned long)ds >> 32));
54 }
55
56 void fini_debug_store_on_cpu(int cpu)
57 {
58 if (!per_cpu(cpu_hw_events, cpu).ds)
59 return;
60
61 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
62 }
63
64 static int alloc_pebs_buffer(int cpu)
65 {
66 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
67 int node = cpu_to_node(cpu);
68 int max, thresh = 1; /* always use a single PEBS record */
69 void *buffer;
70
71 if (!x86_pmu.pebs)
72 return 0;
73
74 buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
75 if (unlikely(!buffer))
76 return -ENOMEM;
77
78 max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
79
80 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
81 ds->pebs_index = ds->pebs_buffer_base;
82 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
83 max * x86_pmu.pebs_record_size;
84
85 ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
86 thresh * x86_pmu.pebs_record_size;
87
88 return 0;
89 }
90
91 static void release_pebs_buffer(int cpu)
92 {
93 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
94
95 if (!ds || !x86_pmu.pebs)
96 return;
97
98 kfree((void *)(unsigned long)ds->pebs_buffer_base);
99 ds->pebs_buffer_base = 0;
100 }
101
102 static int alloc_bts_buffer(int cpu)
103 {
104 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
105 int node = cpu_to_node(cpu);
106 int max, thresh;
107 void *buffer;
108
109 if (!x86_pmu.bts)
110 return 0;
111
112 buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
113 if (unlikely(!buffer))
114 return -ENOMEM;
115
116 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
117 thresh = max / 16;
118
119 ds->bts_buffer_base = (u64)(unsigned long)buffer;
120 ds->bts_index = ds->bts_buffer_base;
121 ds->bts_absolute_maximum = ds->bts_buffer_base +
122 max * BTS_RECORD_SIZE;
123 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
124 thresh * BTS_RECORD_SIZE;
125
126 return 0;
127 }
128
129 static void release_bts_buffer(int cpu)
130 {
131 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
132
133 if (!ds || !x86_pmu.bts)
134 return;
135
136 kfree((void *)(unsigned long)ds->bts_buffer_base);
137 ds->bts_buffer_base = 0;
138 }
139
140 static int alloc_ds_buffer(int cpu)
141 {
142 int node = cpu_to_node(cpu);
143 struct debug_store *ds;
144
145 ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
146 if (unlikely(!ds))
147 return -ENOMEM;
148
149 per_cpu(cpu_hw_events, cpu).ds = ds;
150
151 return 0;
152 }
153
154 static void release_ds_buffer(int cpu)
155 {
156 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
157
158 if (!ds)
159 return;
160
161 per_cpu(cpu_hw_events, cpu).ds = NULL;
162 kfree(ds);
163 }
164
165 void release_ds_buffers(void)
166 {
167 int cpu;
168
169 if (!x86_pmu.bts && !x86_pmu.pebs)
170 return;
171
172 get_online_cpus();
173 for_each_online_cpu(cpu)
174 fini_debug_store_on_cpu(cpu);
175
176 for_each_possible_cpu(cpu) {
177 release_pebs_buffer(cpu);
178 release_bts_buffer(cpu);
179 release_ds_buffer(cpu);
180 }
181 put_online_cpus();
182 }
183
184 void reserve_ds_buffers(void)
185 {
186 int bts_err = 0, pebs_err = 0;
187 int cpu;
188
189 x86_pmu.bts_active = 0;
190 x86_pmu.pebs_active = 0;
191
192 if (!x86_pmu.bts && !x86_pmu.pebs)
193 return;
194
195 if (!x86_pmu.bts)
196 bts_err = 1;
197
198 if (!x86_pmu.pebs)
199 pebs_err = 1;
200
201 get_online_cpus();
202
203 for_each_possible_cpu(cpu) {
204 if (alloc_ds_buffer(cpu)) {
205 bts_err = 1;
206 pebs_err = 1;
207 }
208
209 if (!bts_err && alloc_bts_buffer(cpu))
210 bts_err = 1;
211
212 if (!pebs_err && alloc_pebs_buffer(cpu))
213 pebs_err = 1;
214
215 if (bts_err && pebs_err)
216 break;
217 }
218
219 if (bts_err) {
220 for_each_possible_cpu(cpu)
221 release_bts_buffer(cpu);
222 }
223
224 if (pebs_err) {
225 for_each_possible_cpu(cpu)
226 release_pebs_buffer(cpu);
227 }
228
229 if (bts_err && pebs_err) {
230 for_each_possible_cpu(cpu)
231 release_ds_buffer(cpu);
232 } else {
233 if (x86_pmu.bts && !bts_err)
234 x86_pmu.bts_active = 1;
235
236 if (x86_pmu.pebs && !pebs_err)
237 x86_pmu.pebs_active = 1;
238
239 for_each_online_cpu(cpu)
240 init_debug_store_on_cpu(cpu);
241 }
242
243 put_online_cpus();
244 }
245
246 /*
247 * BTS
248 */
249
250 struct event_constraint bts_constraint =
251 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
252
253 void intel_pmu_enable_bts(u64 config)
254 {
255 unsigned long debugctlmsr;
256
257 debugctlmsr = get_debugctlmsr();
258
259 debugctlmsr |= DEBUGCTLMSR_TR;
260 debugctlmsr |= DEBUGCTLMSR_BTS;
261 debugctlmsr |= DEBUGCTLMSR_BTINT;
262
263 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
264 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
265
266 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
267 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
268
269 update_debugctlmsr(debugctlmsr);
270 }
271
272 void intel_pmu_disable_bts(void)
273 {
274 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
275 unsigned long debugctlmsr;
276
277 if (!cpuc->ds)
278 return;
279
280 debugctlmsr = get_debugctlmsr();
281
282 debugctlmsr &=
283 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
284 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
285
286 update_debugctlmsr(debugctlmsr);
287 }
288
289 int intel_pmu_drain_bts_buffer(void)
290 {
291 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
292 struct debug_store *ds = cpuc->ds;
293 struct bts_record {
294 u64 from;
295 u64 to;
296 u64 flags;
297 };
298 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
299 struct bts_record *at, *top;
300 struct perf_output_handle handle;
301 struct perf_event_header header;
302 struct perf_sample_data data;
303 struct pt_regs regs;
304
305 if (!event)
306 return 0;
307
308 if (!x86_pmu.bts_active)
309 return 0;
310
311 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
312 top = (struct bts_record *)(unsigned long)ds->bts_index;
313
314 if (top <= at)
315 return 0;
316
317 memset(&regs, 0, sizeof(regs));
318
319 ds->bts_index = ds->bts_buffer_base;
320
321 perf_sample_data_init(&data, 0, event->hw.last_period);
322
323 /*
324 * Prepare a generic sample, i.e. fill in the invariant fields.
325 * We will overwrite the from and to address before we output
326 * the sample.
327 */
328 perf_prepare_sample(&header, &data, event, &regs);
329
330 if (perf_output_begin(&handle, event, header.size * (top - at)))
331 return 1;
332
333 for (; at < top; at++) {
334 data.ip = at->from;
335 data.addr = at->to;
336
337 perf_output_sample(&handle, &header, &data, event);
338 }
339
340 perf_output_end(&handle);
341
342 /* There's new data available. */
343 event->hw.interrupts++;
344 event->pending_kill = POLL_IN;
345 return 1;
346 }
347
348 /*
349 * PEBS
350 */
351 struct event_constraint intel_core2_pebs_event_constraints[] = {
352 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
353 INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
354 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
355 INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
356 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
357 EVENT_CONSTRAINT_END
358 };
359
360 struct event_constraint intel_atom_pebs_event_constraints[] = {
361 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
362 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
363 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
364 EVENT_CONSTRAINT_END
365 };
366
367 struct event_constraint intel_nehalem_pebs_event_constraints[] = {
368 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
369 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
370 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
371 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
372 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
373 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
374 INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
375 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
376 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
377 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
378 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
379 EVENT_CONSTRAINT_END
380 };
381
382 struct event_constraint intel_westmere_pebs_event_constraints[] = {
383 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
384 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
385 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
386 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
387 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
388 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
389 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
390 INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
391 INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
392 INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
393 INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
394 EVENT_CONSTRAINT_END
395 };
396
397 struct event_constraint intel_snb_pebs_event_constraints[] = {
398 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
399 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
400 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
401 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
402 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
403 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
404 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
405 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
406 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
407 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
408 EVENT_CONSTRAINT_END
409 };
410
411 struct event_constraint intel_ivb_pebs_event_constraints[] = {
412 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
413 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
414 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
415 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
416 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
417 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
418 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
419 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
420 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
421 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
422 EVENT_CONSTRAINT_END
423 };
424
425 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
426 {
427 struct event_constraint *c;
428
429 if (!event->attr.precise_ip)
430 return NULL;
431
432 if (x86_pmu.pebs_constraints) {
433 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
434 if ((event->hw.config & c->cmask) == c->code)
435 return c;
436 }
437 }
438
439 return &emptyconstraint;
440 }
441
442 void intel_pmu_pebs_enable(struct perf_event *event)
443 {
444 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
445 struct hw_perf_event *hwc = &event->hw;
446
447 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
448
449 cpuc->pebs_enabled |= 1ULL << hwc->idx;
450 }
451
452 void intel_pmu_pebs_disable(struct perf_event *event)
453 {
454 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
455 struct hw_perf_event *hwc = &event->hw;
456
457 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
458 if (cpuc->enabled)
459 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
460
461 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
462 }
463
464 void intel_pmu_pebs_enable_all(void)
465 {
466 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
467
468 if (cpuc->pebs_enabled)
469 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
470 }
471
472 void intel_pmu_pebs_disable_all(void)
473 {
474 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
475
476 if (cpuc->pebs_enabled)
477 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
478 }
479
480 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
481 {
482 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
483 unsigned long from = cpuc->lbr_entries[0].from;
484 unsigned long old_to, to = cpuc->lbr_entries[0].to;
485 unsigned long ip = regs->ip;
486 int is_64bit = 0;
487
488 /*
489 * We don't need to fixup if the PEBS assist is fault like
490 */
491 if (!x86_pmu.intel_cap.pebs_trap)
492 return 1;
493
494 /*
495 * No LBR entry, no basic block, no rewinding
496 */
497 if (!cpuc->lbr_stack.nr || !from || !to)
498 return 0;
499
500 /*
501 * Basic blocks should never cross user/kernel boundaries
502 */
503 if (kernel_ip(ip) != kernel_ip(to))
504 return 0;
505
506 /*
507 * unsigned math, either ip is before the start (impossible) or
508 * the basic block is larger than 1 page (sanity)
509 */
510 if ((ip - to) > PAGE_SIZE)
511 return 0;
512
513 /*
514 * We sampled a branch insn, rewind using the LBR stack
515 */
516 if (ip == to) {
517 set_linear_ip(regs, from);
518 return 1;
519 }
520
521 do {
522 struct insn insn;
523 u8 buf[MAX_INSN_SIZE];
524 void *kaddr;
525
526 old_to = to;
527 if (!kernel_ip(ip)) {
528 int bytes, size = MAX_INSN_SIZE;
529
530 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
531 if (bytes != size)
532 return 0;
533
534 kaddr = buf;
535 } else
536 kaddr = (void *)to;
537
538 #ifdef CONFIG_X86_64
539 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
540 #endif
541 insn_init(&insn, kaddr, is_64bit);
542 insn_get_length(&insn);
543 to += insn.length;
544 } while (to < ip);
545
546 if (to == ip) {
547 set_linear_ip(regs, old_to);
548 return 1;
549 }
550
551 /*
552 * Even though we decoded the basic block, the instruction stream
553 * never matched the given IP, either the TO or the IP got corrupted.
554 */
555 return 0;
556 }
557
558 static void __intel_pmu_pebs_event(struct perf_event *event,
559 struct pt_regs *iregs, void *__pebs)
560 {
561 /*
562 * We cast to pebs_record_core since that is a subset of
563 * both formats and we don't use the other fields in this
564 * routine.
565 */
566 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
567 struct pebs_record_core *pebs = __pebs;
568 struct perf_sample_data data;
569 struct pt_regs regs;
570
571 if (!intel_pmu_save_and_restart(event))
572 return;
573
574 perf_sample_data_init(&data, 0, event->hw.last_period);
575
576 /*
577 * We use the interrupt regs as a base because the PEBS record
578 * does not contain a full regs set, specifically it seems to
579 * lack segment descriptors, which get used by things like
580 * user_mode().
581 *
582 * In the simple case fix up only the IP and BP,SP regs, for
583 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
584 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
585 */
586 regs = *iregs;
587 regs.flags = pebs->flags;
588 set_linear_ip(&regs, pebs->ip);
589 regs.bp = pebs->bp;
590 regs.sp = pebs->sp;
591
592 if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
593 regs.flags |= PERF_EFLAGS_EXACT;
594 else
595 regs.flags &= ~PERF_EFLAGS_EXACT;
596
597 if (has_branch_stack(event))
598 data.br_stack = &cpuc->lbr_stack;
599
600 if (perf_event_overflow(event, &data, &regs))
601 x86_pmu_stop(event, 0);
602 }
603
604 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
605 {
606 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
607 struct debug_store *ds = cpuc->ds;
608 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
609 struct pebs_record_core *at, *top;
610 int n;
611
612 if (!x86_pmu.pebs_active)
613 return;
614
615 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
616 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
617
618 /*
619 * Whatever else happens, drain the thing
620 */
621 ds->pebs_index = ds->pebs_buffer_base;
622
623 if (!test_bit(0, cpuc->active_mask))
624 return;
625
626 WARN_ON_ONCE(!event);
627
628 if (!event->attr.precise_ip)
629 return;
630
631 n = top - at;
632 if (n <= 0)
633 return;
634
635 /*
636 * Should not happen, we program the threshold at 1 and do not
637 * set a reset value.
638 */
639 WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
640 at += n - 1;
641
642 __intel_pmu_pebs_event(event, iregs, at);
643 }
644
645 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
646 {
647 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
648 struct debug_store *ds = cpuc->ds;
649 struct pebs_record_nhm *at, *top;
650 struct perf_event *event = NULL;
651 u64 status = 0;
652 int bit, n;
653
654 if (!x86_pmu.pebs_active)
655 return;
656
657 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
658 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
659
660 ds->pebs_index = ds->pebs_buffer_base;
661
662 n = top - at;
663 if (n <= 0)
664 return;
665
666 /*
667 * Should not happen, we program the threshold at 1 and do not
668 * set a reset value.
669 */
670 WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
671
672 for ( ; at < top; at++) {
673 for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
674 event = cpuc->events[bit];
675 if (!test_bit(bit, cpuc->active_mask))
676 continue;
677
678 WARN_ON_ONCE(!event);
679
680 if (!event->attr.precise_ip)
681 continue;
682
683 if (__test_and_set_bit(bit, (unsigned long *)&status))
684 continue;
685
686 break;
687 }
688
689 if (!event || bit >= x86_pmu.max_pebs_events)
690 continue;
691
692 __intel_pmu_pebs_event(event, iregs, at);
693 }
694 }
695
696 /*
697 * BTS, PEBS probe and setup
698 */
699
700 void intel_ds_init(void)
701 {
702 /*
703 * No support for 32bit formats
704 */
705 if (!boot_cpu_has(X86_FEATURE_DTES64))
706 return;
707
708 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
709 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
710 if (x86_pmu.pebs) {
711 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
712 int format = x86_pmu.intel_cap.pebs_format;
713
714 switch (format) {
715 case 0:
716 printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
717 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
718 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
719 break;
720
721 case 1:
722 printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
723 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
724 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
725 break;
726
727 default:
728 printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
729 x86_pmu.pebs = 0;
730 }
731 }
732 }
733
734 void perf_restore_debug_store(void)
735 {
736 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
737
738 if (!x86_pmu.bts && !x86_pmu.pebs)
739 return;
740
741 wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
742 }