netfilter: remove unnecessary goto statement for error recovery
[cascardo/linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.h
1 #include <linux/module.h>
2 #include <linux/slab.h>
3 #include <linux/pci.h>
4 #include <linux/perf_event.h>
5 #include "perf_event.h"
6
7 #define UNCORE_PMU_NAME_LEN             32
8 #define UNCORE_BOX_HASH_SIZE            8
9
10 #define UNCORE_PMU_HRTIMER_INTERVAL     (60 * NSEC_PER_SEC)
11
12 #define UNCORE_FIXED_EVENT              0xff
13 #define UNCORE_PMC_IDX_MAX_GENERIC      8
14 #define UNCORE_PMC_IDX_FIXED            UNCORE_PMC_IDX_MAX_GENERIC
15 #define UNCORE_PMC_IDX_MAX              (UNCORE_PMC_IDX_FIXED + 1)
16
17 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
18
19 /* SNB event control */
20 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
21 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
22 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
23 #define SNB_UNC_CTL_EN                          (1 << 22)
24 #define SNB_UNC_CTL_INVERT                      (1 << 23)
25 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
26 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
27 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
28
29 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
30                                                  SNB_UNC_CTL_UMASK_MASK | \
31                                                  SNB_UNC_CTL_EDGE_DET | \
32                                                  SNB_UNC_CTL_INVERT | \
33                                                  SNB_UNC_CTL_CMASK_MASK)
34
35 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
36                                                  SNB_UNC_CTL_UMASK_MASK | \
37                                                  SNB_UNC_CTL_EDGE_DET | \
38                                                  SNB_UNC_CTL_INVERT | \
39                                                  NHM_UNC_CTL_CMASK_MASK)
40
41 /* SNB global control register */
42 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
43 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
44 #define SNB_UNC_FIXED_CTR                       0x395
45
46 /* SNB uncore global control */
47 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
48 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
49
50 /* SNB Cbo register */
51 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
52 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
53 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
54
55 /* NHM global control register */
56 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
57 #define NHM_UNC_FIXED_CTR                       0x394
58 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
59
60 /* NHM uncore global control */
61 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
62 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
63
64 /* NHM uncore register */
65 #define NHM_UNC_PERFEVTSEL0                     0x3c0
66 #define NHM_UNC_UNCORE_PMC0                     0x3b0
67
68 /* SNB-EP Box level control */
69 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
70 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
71 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
72 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
73 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
74                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
75                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
76 /* SNB-EP event control */
77 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
78 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
79 #define SNBEP_PMON_CTL_RST              (1 << 17)
80 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
81 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)       /* only for QPI */
82 #define SNBEP_PMON_CTL_EN               (1 << 22)
83 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
84 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
85 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
86                                          SNBEP_PMON_CTL_UMASK_MASK | \
87                                          SNBEP_PMON_CTL_EDGE_DET | \
88                                          SNBEP_PMON_CTL_INVERT | \
89                                          SNBEP_PMON_CTL_TRESH_MASK)
90
91 /* SNB-EP Ubox event control */
92 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
93 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
94                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
95                                  SNBEP_PMON_CTL_UMASK_MASK | \
96                                  SNBEP_PMON_CTL_EDGE_DET | \
97                                  SNBEP_PMON_CTL_INVERT | \
98                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
99
100 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
101 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
102                                                  SNBEP_CBO_PMON_CTL_TID_EN)
103
104 /* SNB-EP PCU event control */
105 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
106 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
107 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
108 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
109 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
110                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
111                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
112                                  SNBEP_PMON_CTL_EDGE_DET | \
113                                  SNBEP_PMON_CTL_INVERT | \
114                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
115                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
116                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
117
118 /* SNB-EP pci control register */
119 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
120 #define SNBEP_PCI_PMON_CTL0                     0xd8
121 /* SNB-EP pci counter register */
122 #define SNBEP_PCI_PMON_CTR0                     0xa0
123
124 /* SNB-EP home agent register */
125 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
126 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
127 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
128 /* SNB-EP memory controller register */
129 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
130 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
131 /* SNB-EP QPI register */
132 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
133 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
134 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
135 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
136
137 /* SNB-EP Ubox register */
138 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
139 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
140
141 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
142 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
143
144 /* SNB-EP Cbo register */
145 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
146 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
147 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
148 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
149 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK      0xfffffc1f
150 #define SNBEP_CBO_MSR_OFFSET                    0x20
151
152 /* SNB-EP PCU register */
153 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
154 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
155 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
156 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
157 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
158 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
159 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
160
161 struct intel_uncore_ops;
162 struct intel_uncore_pmu;
163 struct intel_uncore_box;
164 struct uncore_event_desc;
165
166 struct intel_uncore_type {
167         const char *name;
168         int num_counters;
169         int num_boxes;
170         int perf_ctr_bits;
171         int fixed_ctr_bits;
172         unsigned perf_ctr;
173         unsigned event_ctl;
174         unsigned event_mask;
175         unsigned fixed_ctr;
176         unsigned fixed_ctl;
177         unsigned box_ctl;
178         unsigned msr_offset;
179         unsigned num_shared_regs:8;
180         unsigned single_fixed:1;
181         struct event_constraint unconstrainted;
182         struct event_constraint *constraints;
183         struct intel_uncore_pmu *pmus;
184         struct intel_uncore_ops *ops;
185         struct uncore_event_desc *event_descs;
186         const struct attribute_group *attr_groups[3];
187 };
188
189 #define format_group attr_groups[0]
190
191 struct intel_uncore_ops {
192         void (*init_box)(struct intel_uncore_box *);
193         void (*disable_box)(struct intel_uncore_box *);
194         void (*enable_box)(struct intel_uncore_box *);
195         void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
196         void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
197         u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
198         int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
199         struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
200                                                    struct perf_event *);
201         void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
202 };
203
204 struct intel_uncore_pmu {
205         struct pmu pmu;
206         char name[UNCORE_PMU_NAME_LEN];
207         int pmu_idx;
208         int func_id;
209         struct intel_uncore_type *type;
210         struct intel_uncore_box ** __percpu box;
211         struct list_head box_list;
212 };
213
214 struct intel_uncore_extra_reg {
215         raw_spinlock_t lock;
216         u64 config1;
217         atomic_t ref;
218 };
219
220 struct intel_uncore_box {
221         int phys_id;
222         int n_active;   /* number of active events */
223         int n_events;
224         int cpu;        /* cpu to collect events */
225         unsigned long flags;
226         atomic_t refcnt;
227         struct perf_event *events[UNCORE_PMC_IDX_MAX];
228         struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
229         unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
230         u64 tags[UNCORE_PMC_IDX_MAX];
231         struct pci_dev *pci_dev;
232         struct intel_uncore_pmu *pmu;
233         struct hrtimer hrtimer;
234         struct list_head list;
235         struct intel_uncore_extra_reg shared_regs[0];
236 };
237
238 #define UNCORE_BOX_FLAG_INITIATED       0
239
240 struct uncore_event_desc {
241         struct kobj_attribute attr;
242         const char *config;
243 };
244
245 #define INTEL_UNCORE_EVENT_DESC(_name, _config)                 \
246 {                                                               \
247         .attr   = __ATTR(_name, 0444, uncore_event_show, NULL), \
248         .config = _config,                                      \
249 }
250
251 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                 \
252 static ssize_t __uncore_##_var##_show(struct kobject *kobj,             \
253                                 struct kobj_attribute *attr,            \
254                                 char *page)                             \
255 {                                                                       \
256         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
257         return sprintf(page, _format "\n");                             \
258 }                                                                       \
259 static struct kobj_attribute format_attr_##_var =                       \
260         __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
261
262
263 static ssize_t uncore_event_show(struct kobject *kobj,
264                                 struct kobj_attribute *attr, char *buf)
265 {
266         struct uncore_event_desc *event =
267                 container_of(attr, struct uncore_event_desc, attr);
268         return sprintf(buf, "%s", event->config);
269 }
270
271 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
272 {
273         return box->pmu->type->box_ctl;
274 }
275
276 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
277 {
278         return box->pmu->type->fixed_ctl;
279 }
280
281 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
282 {
283         return box->pmu->type->fixed_ctr;
284 }
285
286 static inline
287 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
288 {
289         return idx * 4 + box->pmu->type->event_ctl;
290 }
291
292 static inline
293 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
294 {
295         return idx * 8 + box->pmu->type->perf_ctr;
296 }
297
298 static inline
299 unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
300 {
301         if (!box->pmu->type->box_ctl)
302                 return 0;
303         return box->pmu->type->box_ctl +
304                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
305 }
306
307 static inline
308 unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
309 {
310         if (!box->pmu->type->fixed_ctl)
311                 return 0;
312         return box->pmu->type->fixed_ctl +
313                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
314 }
315
316 static inline
317 unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
318 {
319         return box->pmu->type->fixed_ctr +
320                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
321 }
322
323 static inline
324 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
325 {
326         return idx + box->pmu->type->event_ctl +
327                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
328 }
329
330 static inline
331 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
332 {
333         return idx + box->pmu->type->perf_ctr +
334                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
335 }
336
337 static inline
338 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
339 {
340         if (box->pci_dev)
341                 return uncore_pci_fixed_ctl(box);
342         else
343                 return uncore_msr_fixed_ctl(box);
344 }
345
346 static inline
347 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
348 {
349         if (box->pci_dev)
350                 return uncore_pci_fixed_ctr(box);
351         else
352                 return uncore_msr_fixed_ctr(box);
353 }
354
355 static inline
356 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
357 {
358         if (box->pci_dev)
359                 return uncore_pci_event_ctl(box, idx);
360         else
361                 return uncore_msr_event_ctl(box, idx);
362 }
363
364 static inline
365 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
366 {
367         if (box->pci_dev)
368                 return uncore_pci_perf_ctr(box, idx);
369         else
370                 return uncore_msr_perf_ctr(box, idx);
371 }
372
373 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
374 {
375         return box->pmu->type->perf_ctr_bits;
376 }
377
378 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
379 {
380         return box->pmu->type->fixed_ctr_bits;
381 }
382
383 static inline int uncore_num_counters(struct intel_uncore_box *box)
384 {
385         return box->pmu->type->num_counters;
386 }
387
388 static inline void uncore_disable_box(struct intel_uncore_box *box)
389 {
390         if (box->pmu->type->ops->disable_box)
391                 box->pmu->type->ops->disable_box(box);
392 }
393
394 static inline void uncore_enable_box(struct intel_uncore_box *box)
395 {
396         if (box->pmu->type->ops->enable_box)
397                 box->pmu->type->ops->enable_box(box);
398 }
399
400 static inline void uncore_disable_event(struct intel_uncore_box *box,
401                                 struct perf_event *event)
402 {
403         box->pmu->type->ops->disable_event(box, event);
404 }
405
406 static inline void uncore_enable_event(struct intel_uncore_box *box,
407                                 struct perf_event *event)
408 {
409         box->pmu->type->ops->enable_event(box, event);
410 }
411
412 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
413                                 struct perf_event *event)
414 {
415         return box->pmu->type->ops->read_counter(box, event);
416 }
417
418 static inline void uncore_box_init(struct intel_uncore_box *box)
419 {
420         if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
421                 if (box->pmu->type->ops->init_box)
422                         box->pmu->type->ops->init_box(box);
423         }
424 }