1 #include <linux/module.h>
2 #include <linux/slab.h>
4 #include <linux/perf_event.h>
5 #include "perf_event.h"
7 #define UNCORE_PMU_NAME_LEN 32
8 #define UNCORE_BOX_HASH_SIZE 8
10 #define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
12 #define UNCORE_FIXED_EVENT 0xff
13 #define UNCORE_PMC_IDX_MAX_GENERIC 8
14 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
15 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
17 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
19 /* SNB event control */
20 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
21 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
22 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
23 #define SNB_UNC_CTL_EN (1 << 22)
24 #define SNB_UNC_CTL_INVERT (1 << 23)
25 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
26 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
27 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
29 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
30 SNB_UNC_CTL_UMASK_MASK | \
31 SNB_UNC_CTL_EDGE_DET | \
32 SNB_UNC_CTL_INVERT | \
33 SNB_UNC_CTL_CMASK_MASK)
35 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
36 SNB_UNC_CTL_UMASK_MASK | \
37 SNB_UNC_CTL_EDGE_DET | \
38 SNB_UNC_CTL_INVERT | \
39 NHM_UNC_CTL_CMASK_MASK)
41 /* SNB global control register */
42 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
43 #define SNB_UNC_FIXED_CTR_CTRL 0x394
44 #define SNB_UNC_FIXED_CTR 0x395
46 /* SNB uncore global control */
47 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
48 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
50 /* SNB Cbo register */
51 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
52 #define SNB_UNC_CBO_0_PER_CTR0 0x706
53 #define SNB_UNC_CBO_MSR_OFFSET 0x10
55 /* NHM global control register */
56 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
57 #define NHM_UNC_FIXED_CTR 0x394
58 #define NHM_UNC_FIXED_CTR_CTRL 0x395
60 /* NHM uncore global control */
61 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
62 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
64 /* NHM uncore register */
65 #define NHM_UNC_PERFEVTSEL0 0x3c0
66 #define NHM_UNC_UNCORE_PMC0 0x3b0
68 /* SNB-EP Box level control */
69 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
70 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
71 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
72 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
73 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
74 SNBEP_PMON_BOX_CTL_RST_CTRS | \
75 SNBEP_PMON_BOX_CTL_FRZ_EN)
76 /* SNB-EP event control */
77 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
78 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
79 #define SNBEP_PMON_CTL_RST (1 << 17)
80 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
81 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */
82 #define SNBEP_PMON_CTL_EN (1 << 22)
83 #define SNBEP_PMON_CTL_INVERT (1 << 23)
84 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
85 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
86 SNBEP_PMON_CTL_UMASK_MASK | \
87 SNBEP_PMON_CTL_EDGE_DET | \
88 SNBEP_PMON_CTL_INVERT | \
89 SNBEP_PMON_CTL_TRESH_MASK)
91 /* SNB-EP Ubox event control */
92 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
93 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
94 (SNBEP_PMON_CTL_EV_SEL_MASK | \
95 SNBEP_PMON_CTL_UMASK_MASK | \
96 SNBEP_PMON_CTL_EDGE_DET | \
97 SNBEP_PMON_CTL_INVERT | \
98 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
100 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
101 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
102 SNBEP_CBO_PMON_CTL_TID_EN)
104 /* SNB-EP PCU event control */
105 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
106 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
107 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
108 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
109 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
110 (SNBEP_PMON_CTL_EV_SEL_MASK | \
111 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
112 SNBEP_PMON_CTL_EDGE_DET | \
113 SNBEP_PMON_CTL_INVERT | \
114 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
115 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
116 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
118 /* SNB-EP pci control register */
119 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
120 #define SNBEP_PCI_PMON_CTL0 0xd8
121 /* SNB-EP pci counter register */
122 #define SNBEP_PCI_PMON_CTR0 0xa0
124 /* SNB-EP home agent register */
125 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
126 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
127 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
128 /* SNB-EP memory controller register */
129 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
130 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
131 /* SNB-EP QPI register */
132 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
133 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
134 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
135 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
137 /* SNB-EP Ubox register */
138 #define SNBEP_U_MSR_PMON_CTR0 0xc16
139 #define SNBEP_U_MSR_PMON_CTL0 0xc10
141 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
142 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
144 /* SNB-EP Cbo register */
145 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
146 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
147 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
148 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
149 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
150 #define SNBEP_CBO_MSR_OFFSET 0x20
152 /* SNB-EP PCU register */
153 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
154 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
155 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
156 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
157 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
158 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
159 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
161 struct intel_uncore_ops;
162 struct intel_uncore_pmu;
163 struct intel_uncore_box;
164 struct uncore_event_desc;
166 struct intel_uncore_type {
179 unsigned num_shared_regs:8;
180 unsigned single_fixed:1;
181 struct event_constraint unconstrainted;
182 struct event_constraint *constraints;
183 struct intel_uncore_pmu *pmus;
184 struct intel_uncore_ops *ops;
185 struct uncore_event_desc *event_descs;
186 const struct attribute_group *attr_groups[3];
189 #define format_group attr_groups[0]
191 struct intel_uncore_ops {
192 void (*init_box)(struct intel_uncore_box *);
193 void (*disable_box)(struct intel_uncore_box *);
194 void (*enable_box)(struct intel_uncore_box *);
195 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
196 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
197 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
198 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
199 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
200 struct perf_event *);
201 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
204 struct intel_uncore_pmu {
206 char name[UNCORE_PMU_NAME_LEN];
209 struct intel_uncore_type *type;
210 struct intel_uncore_box ** __percpu box;
211 struct list_head box_list;
214 struct intel_uncore_extra_reg {
220 struct intel_uncore_box {
222 int n_active; /* number of active events */
224 int cpu; /* cpu to collect events */
227 struct perf_event *events[UNCORE_PMC_IDX_MAX];
228 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
229 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
230 u64 tags[UNCORE_PMC_IDX_MAX];
231 struct pci_dev *pci_dev;
232 struct intel_uncore_pmu *pmu;
233 struct hrtimer hrtimer;
234 struct list_head list;
235 struct intel_uncore_extra_reg shared_regs[0];
238 #define UNCORE_BOX_FLAG_INITIATED 0
240 struct uncore_event_desc {
241 struct kobj_attribute attr;
245 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
247 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
251 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
252 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
253 struct kobj_attribute *attr, \
256 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
257 return sprintf(page, _format "\n"); \
259 static struct kobj_attribute format_attr_##_var = \
260 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
263 static ssize_t uncore_event_show(struct kobject *kobj,
264 struct kobj_attribute *attr, char *buf)
266 struct uncore_event_desc *event =
267 container_of(attr, struct uncore_event_desc, attr);
268 return sprintf(buf, "%s", event->config);
271 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
273 return box->pmu->type->box_ctl;
276 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
278 return box->pmu->type->fixed_ctl;
281 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
283 return box->pmu->type->fixed_ctr;
287 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
289 return idx * 4 + box->pmu->type->event_ctl;
293 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
295 return idx * 8 + box->pmu->type->perf_ctr;
299 unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
301 if (!box->pmu->type->box_ctl)
303 return box->pmu->type->box_ctl +
304 box->pmu->type->msr_offset * box->pmu->pmu_idx;
308 unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
310 if (!box->pmu->type->fixed_ctl)
312 return box->pmu->type->fixed_ctl +
313 box->pmu->type->msr_offset * box->pmu->pmu_idx;
317 unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
319 return box->pmu->type->fixed_ctr +
320 box->pmu->type->msr_offset * box->pmu->pmu_idx;
324 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
326 return idx + box->pmu->type->event_ctl +
327 box->pmu->type->msr_offset * box->pmu->pmu_idx;
331 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
333 return idx + box->pmu->type->perf_ctr +
334 box->pmu->type->msr_offset * box->pmu->pmu_idx;
338 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
341 return uncore_pci_fixed_ctl(box);
343 return uncore_msr_fixed_ctl(box);
347 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
350 return uncore_pci_fixed_ctr(box);
352 return uncore_msr_fixed_ctr(box);
356 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
359 return uncore_pci_event_ctl(box, idx);
361 return uncore_msr_event_ctl(box, idx);
365 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
368 return uncore_pci_perf_ctr(box, idx);
370 return uncore_msr_perf_ctr(box, idx);
373 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
375 return box->pmu->type->perf_ctr_bits;
378 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
380 return box->pmu->type->fixed_ctr_bits;
383 static inline int uncore_num_counters(struct intel_uncore_box *box)
385 return box->pmu->type->num_counters;
388 static inline void uncore_disable_box(struct intel_uncore_box *box)
390 if (box->pmu->type->ops->disable_box)
391 box->pmu->type->ops->disable_box(box);
394 static inline void uncore_enable_box(struct intel_uncore_box *box)
396 if (box->pmu->type->ops->enable_box)
397 box->pmu->type->ops->enable_box(box);
400 static inline void uncore_disable_event(struct intel_uncore_box *box,
401 struct perf_event *event)
403 box->pmu->type->ops->disable_event(box, event);
406 static inline void uncore_enable_event(struct intel_uncore_box *box,
407 struct perf_event *event)
409 box->pmu->type->ops->enable_event(box, event);
412 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
413 struct perf_event *event)
415 return box->pmu->type->ops->read_counter(box, event);
418 static inline void uncore_box_init(struct intel_uncore_box *box)
420 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
421 if (box->pmu->type->ops->init_box)
422 box->pmu->type->ops->init_box(box);