nmi_backtrace: add more trigger_*_cpu_backtrace() methods
[cascardo/linux.git] / arch / x86 / include / asm / perf_event.h
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3
4 /*
5  * Performance event hw details:
6  */
7
8 #define INTEL_PMC_MAX_GENERIC                                  32
9 #define INTEL_PMC_MAX_FIXED                                     3
10 #define INTEL_PMC_IDX_FIXED                                    32
11
12 #define X86_PMC_IDX_MAX                                        64
13
14 #define MSR_ARCH_PERFMON_PERFCTR0                             0xc1
15 #define MSR_ARCH_PERFMON_PERFCTR1                             0xc2
16
17 #define MSR_ARCH_PERFMON_EVENTSEL0                           0x186
18 #define MSR_ARCH_PERFMON_EVENTSEL1                           0x187
19
20 #define ARCH_PERFMON_EVENTSEL_EVENT                     0x000000FFULL
21 #define ARCH_PERFMON_EVENTSEL_UMASK                     0x0000FF00ULL
22 #define ARCH_PERFMON_EVENTSEL_USR                       (1ULL << 16)
23 #define ARCH_PERFMON_EVENTSEL_OS                        (1ULL << 17)
24 #define ARCH_PERFMON_EVENTSEL_EDGE                      (1ULL << 18)
25 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL               (1ULL << 19)
26 #define ARCH_PERFMON_EVENTSEL_INT                       (1ULL << 20)
27 #define ARCH_PERFMON_EVENTSEL_ANY                       (1ULL << 21)
28 #define ARCH_PERFMON_EVENTSEL_ENABLE                    (1ULL << 22)
29 #define ARCH_PERFMON_EVENTSEL_INV                       (1ULL << 23)
30 #define ARCH_PERFMON_EVENTSEL_CMASK                     0xFF000000ULL
31
32 #define HSW_IN_TX                                       (1ULL << 32)
33 #define HSW_IN_TX_CHECKPOINTED                          (1ULL << 33)
34
35 #define AMD64_EVENTSEL_INT_CORE_ENABLE                  (1ULL << 36)
36 #define AMD64_EVENTSEL_GUESTONLY                        (1ULL << 40)
37 #define AMD64_EVENTSEL_HOSTONLY                         (1ULL << 41)
38
39 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT               37
40 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK                \
41         (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
42
43 #define AMD64_EVENTSEL_EVENT    \
44         (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
45 #define INTEL_ARCH_EVENT_MASK   \
46         (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
47
48 #define X86_RAW_EVENT_MASK              \
49         (ARCH_PERFMON_EVENTSEL_EVENT |  \
50          ARCH_PERFMON_EVENTSEL_UMASK |  \
51          ARCH_PERFMON_EVENTSEL_EDGE  |  \
52          ARCH_PERFMON_EVENTSEL_INV   |  \
53          ARCH_PERFMON_EVENTSEL_CMASK)
54 #define X86_ALL_EVENT_FLAGS                     \
55         (ARCH_PERFMON_EVENTSEL_EDGE |           \
56          ARCH_PERFMON_EVENTSEL_INV |            \
57          ARCH_PERFMON_EVENTSEL_CMASK |          \
58          ARCH_PERFMON_EVENTSEL_ANY |            \
59          ARCH_PERFMON_EVENTSEL_PIN_CONTROL |    \
60          HSW_IN_TX |                            \
61          HSW_IN_TX_CHECKPOINTED)
62 #define AMD64_RAW_EVENT_MASK            \
63         (X86_RAW_EVENT_MASK          |  \
64          AMD64_EVENTSEL_EVENT)
65 #define AMD64_RAW_EVENT_MASK_NB         \
66         (AMD64_EVENTSEL_EVENT        |  \
67          ARCH_PERFMON_EVENTSEL_UMASK)
68 #define AMD64_NUM_COUNTERS                              4
69 #define AMD64_NUM_COUNTERS_CORE                         6
70 #define AMD64_NUM_COUNTERS_NB                           4
71
72 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL           0x3c
73 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK         (0x00 << 8)
74 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX         0
75 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
76                 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
77
78 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED              6
79 #define ARCH_PERFMON_EVENTS_COUNT                       7
80
81 /*
82  * Intel "Architectural Performance Monitoring" CPUID
83  * detection/enumeration details:
84  */
85 union cpuid10_eax {
86         struct {
87                 unsigned int version_id:8;
88                 unsigned int num_counters:8;
89                 unsigned int bit_width:8;
90                 unsigned int mask_length:8;
91         } split;
92         unsigned int full;
93 };
94
95 union cpuid10_ebx {
96         struct {
97                 unsigned int no_unhalted_core_cycles:1;
98                 unsigned int no_instructions_retired:1;
99                 unsigned int no_unhalted_reference_cycles:1;
100                 unsigned int no_llc_reference:1;
101                 unsigned int no_llc_misses:1;
102                 unsigned int no_branch_instruction_retired:1;
103                 unsigned int no_branch_misses_retired:1;
104         } split;
105         unsigned int full;
106 };
107
108 union cpuid10_edx {
109         struct {
110                 unsigned int num_counters_fixed:5;
111                 unsigned int bit_width_fixed:8;
112                 unsigned int reserved:19;
113         } split;
114         unsigned int full;
115 };
116
117 struct x86_pmu_capability {
118         int             version;
119         int             num_counters_gp;
120         int             num_counters_fixed;
121         int             bit_width_gp;
122         int             bit_width_fixed;
123         unsigned int    events_mask;
124         int             events_mask_len;
125 };
126
127 /*
128  * Fixed-purpose performance events:
129  */
130
131 /*
132  * All 3 fixed-mode PMCs are configured via this single MSR:
133  */
134 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
135
136 /*
137  * The counts are available in three separate MSRs:
138  */
139
140 /* Instr_Retired.Any: */
141 #define MSR_ARCH_PERFMON_FIXED_CTR0     0x309
142 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS        (INTEL_PMC_IDX_FIXED + 0)
143
144 /* CPU_CLK_Unhalted.Core: */
145 #define MSR_ARCH_PERFMON_FIXED_CTR1     0x30a
146 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES  (INTEL_PMC_IDX_FIXED + 1)
147
148 /* CPU_CLK_Unhalted.Ref: */
149 #define MSR_ARCH_PERFMON_FIXED_CTR2     0x30b
150 #define INTEL_PMC_IDX_FIXED_REF_CYCLES  (INTEL_PMC_IDX_FIXED + 2)
151 #define INTEL_PMC_MSK_FIXED_REF_CYCLES  (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
152
153 /*
154  * We model BTS tracing as another fixed-mode PMC.
155  *
156  * We choose a value in the middle of the fixed event range, since lower
157  * values are used by actual fixed events and higher values are used
158  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
159  */
160 #define INTEL_PMC_IDX_FIXED_BTS                         (INTEL_PMC_IDX_FIXED + 16)
161
162 #define GLOBAL_STATUS_COND_CHG                          BIT_ULL(63)
163 #define GLOBAL_STATUS_BUFFER_OVF                        BIT_ULL(62)
164 #define GLOBAL_STATUS_UNC_OVF                           BIT_ULL(61)
165 #define GLOBAL_STATUS_ASIF                              BIT_ULL(60)
166 #define GLOBAL_STATUS_COUNTERS_FROZEN                   BIT_ULL(59)
167 #define GLOBAL_STATUS_LBRS_FROZEN                       BIT_ULL(58)
168 #define GLOBAL_STATUS_TRACE_TOPAPMI                     BIT_ULL(55)
169
170 /*
171  * IBS cpuid feature detection
172  */
173
174 #define IBS_CPUID_FEATURES              0x8000001b
175
176 /*
177  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
178  * bit 0 is used to indicate the existence of IBS.
179  */
180 #define IBS_CAPS_AVAIL                  (1U<<0)
181 #define IBS_CAPS_FETCHSAM               (1U<<1)
182 #define IBS_CAPS_OPSAM                  (1U<<2)
183 #define IBS_CAPS_RDWROPCNT              (1U<<3)
184 #define IBS_CAPS_OPCNT                  (1U<<4)
185 #define IBS_CAPS_BRNTRGT                (1U<<5)
186 #define IBS_CAPS_OPCNTEXT               (1U<<6)
187 #define IBS_CAPS_RIPINVALIDCHK          (1U<<7)
188 #define IBS_CAPS_OPBRNFUSE              (1U<<8)
189 #define IBS_CAPS_FETCHCTLEXTD           (1U<<9)
190 #define IBS_CAPS_OPDATA4                (1U<<10)
191
192 #define IBS_CAPS_DEFAULT                (IBS_CAPS_AVAIL         \
193                                          | IBS_CAPS_FETCHSAM    \
194                                          | IBS_CAPS_OPSAM)
195
196 /*
197  * IBS APIC setup
198  */
199 #define IBSCTL                          0x1cc
200 #define IBSCTL_LVT_OFFSET_VALID         (1ULL<<8)
201 #define IBSCTL_LVT_OFFSET_MASK          0x0F
202
203 /* ibs fetch bits/masks */
204 #define IBS_FETCH_RAND_EN       (1ULL<<57)
205 #define IBS_FETCH_VAL           (1ULL<<49)
206 #define IBS_FETCH_ENABLE        (1ULL<<48)
207 #define IBS_FETCH_CNT           0xFFFF0000ULL
208 #define IBS_FETCH_MAX_CNT       0x0000FFFFULL
209
210 /* ibs op bits/masks */
211 /* lower 4 bits of the current count are ignored: */
212 #define IBS_OP_CUR_CNT          (0xFFFF0ULL<<32)
213 #define IBS_OP_CNT_CTL          (1ULL<<19)
214 #define IBS_OP_VAL              (1ULL<<18)
215 #define IBS_OP_ENABLE           (1ULL<<17)
216 #define IBS_OP_MAX_CNT          0x0000FFFFULL
217 #define IBS_OP_MAX_CNT_EXT      0x007FFFFFULL   /* not a register bit mask */
218 #define IBS_RIP_INVALID         (1ULL<<38)
219
220 #ifdef CONFIG_X86_LOCAL_APIC
221 extern u32 get_ibs_caps(void);
222 #else
223 static inline u32 get_ibs_caps(void) { return 0; }
224 #endif
225
226 #ifdef CONFIG_PERF_EVENTS
227 extern void perf_events_lapic_init(void);
228
229 /*
230  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
231  * unused and ABI specified to be 0, so nobody should care what we do with
232  * them.
233  *
234  * EXACT - the IP points to the exact instruction that triggered the
235  *         event (HW bugs exempt).
236  * VM    - original X86_VM_MASK; see set_linear_ip().
237  */
238 #define PERF_EFLAGS_EXACT       (1UL << 3)
239 #define PERF_EFLAGS_VM          (1UL << 5)
240
241 struct pt_regs;
242 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
243 extern unsigned long perf_misc_flags(struct pt_regs *regs);
244 #define perf_misc_flags(regs)   perf_misc_flags(regs)
245
246 #include <asm/stacktrace.h>
247
248 /*
249  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
250  * and the comment with PERF_EFLAGS_EXACT.
251  */
252 #define perf_arch_fetch_caller_regs(regs, __ip)         {       \
253         (regs)->ip = (__ip);                                    \
254         (regs)->bp = caller_frame_pointer();                    \
255         (regs)->cs = __KERNEL_CS;                               \
256         regs->flags = 0;                                        \
257         asm volatile(                                           \
258                 _ASM_MOV "%%"_ASM_SP ", %0\n"                   \
259                 : "=m" ((regs)->sp)                             \
260                 :: "memory"                                     \
261         );                                                      \
262 }
263
264 struct perf_guest_switch_msr {
265         unsigned msr;
266         u64 host, guest;
267 };
268
269 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
270 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
271 extern void perf_check_microcode(void);
272 #else
273 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
274 {
275         *nr = 0;
276         return NULL;
277 }
278
279 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
280 {
281         memset(cap, 0, sizeof(*cap));
282 }
283
284 static inline void perf_events_lapic_init(void) { }
285 static inline void perf_check_microcode(void) { }
286 #endif
287
288 #ifdef CONFIG_CPU_SUP_INTEL
289  extern void intel_pt_handle_vmx(int on);
290 #endif
291
292 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
293  extern void amd_pmu_enable_virt(void);
294  extern void amd_pmu_disable_virt(void);
295 #else
296  static inline void amd_pmu_enable_virt(void) { }
297  static inline void amd_pmu_disable_virt(void) { }
298 #endif
299
300 #define arch_perf_out_copy_user copy_from_user_nmi
301
302 #endif /* _ASM_X86_PERF_EVENT_H */