perf, arch: Cleanup perf-pmu init vs lockup-detector
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / include / asm / perf_event.h
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3
4 /*
5 * Performance event hw details:
6 */
7
8 #define X86_PMC_MAX_GENERIC 32
9 #define X86_PMC_MAX_FIXED 3
10
11 #define X86_PMC_IDX_GENERIC 0
12 #define X86_PMC_IDX_FIXED 32
13 #define X86_PMC_IDX_MAX 64
14
15 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
17
18 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20
21 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
29 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31
32 #define AMD64_EVENTSEL_EVENT \
33 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
34 #define INTEL_ARCH_EVENT_MASK \
35 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
36
37 #define X86_RAW_EVENT_MASK \
38 (ARCH_PERFMON_EVENTSEL_EVENT | \
39 ARCH_PERFMON_EVENTSEL_UMASK | \
40 ARCH_PERFMON_EVENTSEL_EDGE | \
41 ARCH_PERFMON_EVENTSEL_INV | \
42 ARCH_PERFMON_EVENTSEL_CMASK)
43 #define AMD64_RAW_EVENT_MASK \
44 (X86_RAW_EVENT_MASK | \
45 AMD64_EVENTSEL_EVENT)
46
47 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
48 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
49 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
50 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
51 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
52
53 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
54
55 /*
56 * Intel "Architectural Performance Monitoring" CPUID
57 * detection/enumeration details:
58 */
59 union cpuid10_eax {
60 struct {
61 unsigned int version_id:8;
62 unsigned int num_counters:8;
63 unsigned int bit_width:8;
64 unsigned int mask_length:8;
65 } split;
66 unsigned int full;
67 };
68
69 union cpuid10_edx {
70 struct {
71 unsigned int num_counters_fixed:5;
72 unsigned int bit_width_fixed:8;
73 unsigned int reserved:19;
74 } split;
75 unsigned int full;
76 };
77
78
79 /*
80 * Fixed-purpose performance events:
81 */
82
83 /*
84 * All 3 fixed-mode PMCs are configured via this single MSR:
85 */
86 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
87
88 /*
89 * The counts are available in three separate MSRs:
90 */
91
92 /* Instr_Retired.Any: */
93 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
94 #define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
95
96 /* CPU_CLK_Unhalted.Core: */
97 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
98 #define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
99
100 /* CPU_CLK_Unhalted.Ref: */
101 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
102 #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
103
104 /*
105 * We model BTS tracing as another fixed-mode PMC.
106 *
107 * We choose a value in the middle of the fixed event range, since lower
108 * values are used by actual fixed events and higher values are used
109 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
110 */
111 #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
112
113 /* IbsFetchCtl bits/masks */
114 #define IBS_FETCH_RAND_EN (1ULL<<57)
115 #define IBS_FETCH_VAL (1ULL<<49)
116 #define IBS_FETCH_ENABLE (1ULL<<48)
117 #define IBS_FETCH_CNT 0xFFFF0000ULL
118 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
119
120 /* IbsOpCtl bits */
121 #define IBS_OP_CNT_CTL (1ULL<<19)
122 #define IBS_OP_VAL (1ULL<<18)
123 #define IBS_OP_ENABLE (1ULL<<17)
124 #define IBS_OP_MAX_CNT 0x0000FFFFULL
125 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
126
127 #ifdef CONFIG_PERF_EVENTS
128 extern void perf_events_lapic_init(void);
129
130 #define PERF_EVENT_INDEX_OFFSET 0
131
132 /*
133 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
134 * This flag is otherwise unused and ABI specified to be 0, so nobody should
135 * care what we do with it.
136 */
137 #define PERF_EFLAGS_EXACT (1UL << 3)
138
139 struct pt_regs;
140 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
141 extern unsigned long perf_misc_flags(struct pt_regs *regs);
142 #define perf_misc_flags(regs) perf_misc_flags(regs)
143
144 #include <asm/stacktrace.h>
145
146 /*
147 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
148 * and the comment with PERF_EFLAGS_EXACT.
149 */
150 #define perf_arch_fetch_caller_regs(regs, __ip) { \
151 (regs)->ip = (__ip); \
152 (regs)->bp = caller_frame_pointer(); \
153 (regs)->cs = __KERNEL_CS; \
154 regs->flags = 0; \
155 }
156
157 #else
158 static inline void perf_events_lapic_init(void) { }
159 #endif
160
161 #endif /* _ASM_X86_PERF_EVENT_H */