perf/x86/intel: Add Ivy Bridge-EP uncore IRP box support
[cascardo/linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
1 #include "perf_event_intel_uncore.h"
2
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9 static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
11 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
12
13 /* mask of cpus that collect uncore events */
14 static cpumask_t uncore_cpu_mask;
15
16 /* constraint for the fixed counter */
17 static struct event_constraint constraint_fixed =
18         EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
19 static struct event_constraint constraint_empty =
20         EVENT_CONSTRAINT(0, 0, 0);
21
22 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
23                                 ((1ULL << (n)) - 1)))
24
25 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
26 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
27 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
29 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
30 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
33 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
38 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
39 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
40 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
41 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
42 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
43 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
44 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
45 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
46 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
50 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
68
69 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
70 {
71         u64 count;
72
73         rdmsrl(event->hw.event_base, count);
74
75         return count;
76 }
77
78 /*
79  * generic get constraint function for shared match/mask registers.
80  */
81 static struct event_constraint *
82 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
83 {
84         struct intel_uncore_extra_reg *er;
85         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
86         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
87         unsigned long flags;
88         bool ok = false;
89
90         /*
91          * reg->alloc can be set due to existing state, so for fake box we
92          * need to ignore this, otherwise we might fail to allocate proper
93          * fake state for this extra reg constraint.
94          */
95         if (reg1->idx == EXTRA_REG_NONE ||
96             (!uncore_box_is_fake(box) && reg1->alloc))
97                 return NULL;
98
99         er = &box->shared_regs[reg1->idx];
100         raw_spin_lock_irqsave(&er->lock, flags);
101         if (!atomic_read(&er->ref) ||
102             (er->config1 == reg1->config && er->config2 == reg2->config)) {
103                 atomic_inc(&er->ref);
104                 er->config1 = reg1->config;
105                 er->config2 = reg2->config;
106                 ok = true;
107         }
108         raw_spin_unlock_irqrestore(&er->lock, flags);
109
110         if (ok) {
111                 if (!uncore_box_is_fake(box))
112                         reg1->alloc = 1;
113                 return NULL;
114         }
115
116         return &constraint_empty;
117 }
118
119 static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
120 {
121         struct intel_uncore_extra_reg *er;
122         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
123
124         /*
125          * Only put constraint if extra reg was actually allocated. Also
126          * takes care of event which do not use an extra shared reg.
127          *
128          * Also, if this is a fake box we shouldn't touch any event state
129          * (reg->alloc) and we don't care about leaving inconsistent box
130          * state either since it will be thrown out.
131          */
132         if (uncore_box_is_fake(box) || !reg1->alloc)
133                 return;
134
135         er = &box->shared_regs[reg1->idx];
136         atomic_dec(&er->ref);
137         reg1->alloc = 0;
138 }
139
140 static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
141 {
142         struct intel_uncore_extra_reg *er;
143         unsigned long flags;
144         u64 config;
145
146         er = &box->shared_regs[idx];
147
148         raw_spin_lock_irqsave(&er->lock, flags);
149         config = er->config;
150         raw_spin_unlock_irqrestore(&er->lock, flags);
151
152         return config;
153 }
154
155 /* Sandy Bridge-EP uncore support */
156 static struct intel_uncore_type snbep_uncore_cbox;
157 static struct intel_uncore_type snbep_uncore_pcu;
158
159 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
160 {
161         struct pci_dev *pdev = box->pci_dev;
162         int box_ctl = uncore_pci_box_ctl(box);
163         u32 config = 0;
164
165         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
166                 config |= SNBEP_PMON_BOX_CTL_FRZ;
167                 pci_write_config_dword(pdev, box_ctl, config);
168         }
169 }
170
171 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
172 {
173         struct pci_dev *pdev = box->pci_dev;
174         int box_ctl = uncore_pci_box_ctl(box);
175         u32 config = 0;
176
177         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
178                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
179                 pci_write_config_dword(pdev, box_ctl, config);
180         }
181 }
182
183 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
184 {
185         struct pci_dev *pdev = box->pci_dev;
186         struct hw_perf_event *hwc = &event->hw;
187
188         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
189 }
190
191 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
192 {
193         struct pci_dev *pdev = box->pci_dev;
194         struct hw_perf_event *hwc = &event->hw;
195
196         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
197 }
198
199 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
200 {
201         struct pci_dev *pdev = box->pci_dev;
202         struct hw_perf_event *hwc = &event->hw;
203         u64 count = 0;
204
205         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
206         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
207
208         return count;
209 }
210
211 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
212 {
213         struct pci_dev *pdev = box->pci_dev;
214
215         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
216 }
217
218 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
219 {
220         u64 config;
221         unsigned msr;
222
223         msr = uncore_msr_box_ctl(box);
224         if (msr) {
225                 rdmsrl(msr, config);
226                 config |= SNBEP_PMON_BOX_CTL_FRZ;
227                 wrmsrl(msr, config);
228         }
229 }
230
231 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
232 {
233         u64 config;
234         unsigned msr;
235
236         msr = uncore_msr_box_ctl(box);
237         if (msr) {
238                 rdmsrl(msr, config);
239                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
240                 wrmsrl(msr, config);
241         }
242 }
243
244 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
245 {
246         struct hw_perf_event *hwc = &event->hw;
247         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
248
249         if (reg1->idx != EXTRA_REG_NONE)
250                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
251
252         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
253 }
254
255 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
256                                         struct perf_event *event)
257 {
258         struct hw_perf_event *hwc = &event->hw;
259
260         wrmsrl(hwc->config_base, hwc->config);
261 }
262
263 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
264 {
265         unsigned msr = uncore_msr_box_ctl(box);
266
267         if (msr)
268                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
269 }
270
271 static struct attribute *snbep_uncore_formats_attr[] = {
272         &format_attr_event.attr,
273         &format_attr_umask.attr,
274         &format_attr_edge.attr,
275         &format_attr_inv.attr,
276         &format_attr_thresh8.attr,
277         NULL,
278 };
279
280 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
281         &format_attr_event.attr,
282         &format_attr_umask.attr,
283         &format_attr_edge.attr,
284         &format_attr_inv.attr,
285         &format_attr_thresh5.attr,
286         NULL,
287 };
288
289 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
290         &format_attr_event.attr,
291         &format_attr_umask.attr,
292         &format_attr_edge.attr,
293         &format_attr_tid_en.attr,
294         &format_attr_inv.attr,
295         &format_attr_thresh8.attr,
296         &format_attr_filter_tid.attr,
297         &format_attr_filter_nid.attr,
298         &format_attr_filter_state.attr,
299         &format_attr_filter_opc.attr,
300         NULL,
301 };
302
303 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
304         &format_attr_event_ext.attr,
305         &format_attr_occ_sel.attr,
306         &format_attr_edge.attr,
307         &format_attr_inv.attr,
308         &format_attr_thresh5.attr,
309         &format_attr_occ_invert.attr,
310         &format_attr_occ_edge.attr,
311         &format_attr_filter_band0.attr,
312         &format_attr_filter_band1.attr,
313         &format_attr_filter_band2.attr,
314         &format_attr_filter_band3.attr,
315         NULL,
316 };
317
318 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
319         &format_attr_event_ext.attr,
320         &format_attr_umask.attr,
321         &format_attr_edge.attr,
322         &format_attr_inv.attr,
323         &format_attr_thresh8.attr,
324         &format_attr_match_rds.attr,
325         &format_attr_match_rnid30.attr,
326         &format_attr_match_rnid4.attr,
327         &format_attr_match_dnid.attr,
328         &format_attr_match_mc.attr,
329         &format_attr_match_opc.attr,
330         &format_attr_match_vnw.attr,
331         &format_attr_match0.attr,
332         &format_attr_match1.attr,
333         &format_attr_mask_rds.attr,
334         &format_attr_mask_rnid30.attr,
335         &format_attr_mask_rnid4.attr,
336         &format_attr_mask_dnid.attr,
337         &format_attr_mask_mc.attr,
338         &format_attr_mask_opc.attr,
339         &format_attr_mask_vnw.attr,
340         &format_attr_mask0.attr,
341         &format_attr_mask1.attr,
342         NULL,
343 };
344
345 static struct uncore_event_desc snbep_uncore_imc_events[] = {
346         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
347         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
348         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
349         { /* end: all zeroes */ },
350 };
351
352 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
353         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
354         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
355         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
356         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
357         { /* end: all zeroes */ },
358 };
359
360 static struct attribute_group snbep_uncore_format_group = {
361         .name = "format",
362         .attrs = snbep_uncore_formats_attr,
363 };
364
365 static struct attribute_group snbep_uncore_ubox_format_group = {
366         .name = "format",
367         .attrs = snbep_uncore_ubox_formats_attr,
368 };
369
370 static struct attribute_group snbep_uncore_cbox_format_group = {
371         .name = "format",
372         .attrs = snbep_uncore_cbox_formats_attr,
373 };
374
375 static struct attribute_group snbep_uncore_pcu_format_group = {
376         .name = "format",
377         .attrs = snbep_uncore_pcu_formats_attr,
378 };
379
380 static struct attribute_group snbep_uncore_qpi_format_group = {
381         .name = "format",
382         .attrs = snbep_uncore_qpi_formats_attr,
383 };
384
385 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
386         .init_box       = snbep_uncore_msr_init_box,            \
387         .disable_box    = snbep_uncore_msr_disable_box,         \
388         .enable_box     = snbep_uncore_msr_enable_box,          \
389         .disable_event  = snbep_uncore_msr_disable_event,       \
390         .enable_event   = snbep_uncore_msr_enable_event,        \
391         .read_counter   = uncore_msr_read_counter
392
393 static struct intel_uncore_ops snbep_uncore_msr_ops = {
394         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
395 };
396
397 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
398         .init_box       = snbep_uncore_pci_init_box,            \
399         .disable_box    = snbep_uncore_pci_disable_box,         \
400         .enable_box     = snbep_uncore_pci_enable_box,          \
401         .disable_event  = snbep_uncore_pci_disable_event,       \
402         .read_counter   = snbep_uncore_pci_read_counter
403
404 static struct intel_uncore_ops snbep_uncore_pci_ops = {
405         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
406         .enable_event   = snbep_uncore_pci_enable_event,        \
407 };
408
409 static struct event_constraint snbep_uncore_cbox_constraints[] = {
410         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
411         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
412         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
413         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
414         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
415         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
416         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
417         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
418         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
419         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
420         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
421         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
422         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
423         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
424         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
425         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
426         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
427         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
428         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
429         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
430         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
431         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
432         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
433         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
434         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
435         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
436         EVENT_CONSTRAINT_END
437 };
438
439 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
440         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
441         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
442         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
443         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
444         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
445         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
446         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
447         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
448         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
449         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
450         EVENT_CONSTRAINT_END
451 };
452
453 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
454         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
455         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
456         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
457         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
458         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
459         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
460         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
461         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
462         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
463         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
464         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
465         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
466         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
467         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
468         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
469         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
470         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
471         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
472         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
473         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
474         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
475         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
476         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
477         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
478         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
479         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
480         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
481         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
482         EVENT_CONSTRAINT_END
483 };
484
485 static struct intel_uncore_type snbep_uncore_ubox = {
486         .name           = "ubox",
487         .num_counters   = 2,
488         .num_boxes      = 1,
489         .perf_ctr_bits  = 44,
490         .fixed_ctr_bits = 48,
491         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
492         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
493         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
494         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
495         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
496         .ops            = &snbep_uncore_msr_ops,
497         .format_group   = &snbep_uncore_ubox_format_group,
498 };
499
500 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
501         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
502                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
503         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
504         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
505         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
506         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
507         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
508         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
509         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
510         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
511         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
512         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
513         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
514         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
515         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
516         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
517         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
518         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
519         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
520         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
521         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
522         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
523         EVENT_EXTRA_END
524 };
525
526 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
527 {
528         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
529         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
530         int i;
531
532         if (uncore_box_is_fake(box))
533                 return;
534
535         for (i = 0; i < 5; i++) {
536                 if (reg1->alloc & (0x1 << i))
537                         atomic_sub(1 << (i * 6), &er->ref);
538         }
539         reg1->alloc = 0;
540 }
541
542 static struct event_constraint *
543 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
544                             u64 (*cbox_filter_mask)(int fields))
545 {
546         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
547         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
548         int i, alloc = 0;
549         unsigned long flags;
550         u64 mask;
551
552         if (reg1->idx == EXTRA_REG_NONE)
553                 return NULL;
554
555         raw_spin_lock_irqsave(&er->lock, flags);
556         for (i = 0; i < 5; i++) {
557                 if (!(reg1->idx & (0x1 << i)))
558                         continue;
559                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
560                         continue;
561
562                 mask = cbox_filter_mask(0x1 << i);
563                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
564                     !((reg1->config ^ er->config) & mask)) {
565                         atomic_add(1 << (i * 6), &er->ref);
566                         er->config &= ~mask;
567                         er->config |= reg1->config & mask;
568                         alloc |= (0x1 << i);
569                 } else {
570                         break;
571                 }
572         }
573         raw_spin_unlock_irqrestore(&er->lock, flags);
574         if (i < 5)
575                 goto fail;
576
577         if (!uncore_box_is_fake(box))
578                 reg1->alloc |= alloc;
579
580         return NULL;
581 fail:
582         for (; i >= 0; i--) {
583                 if (alloc & (0x1 << i))
584                         atomic_sub(1 << (i * 6), &er->ref);
585         }
586         return &constraint_empty;
587 }
588
589 static u64 snbep_cbox_filter_mask(int fields)
590 {
591         u64 mask = 0;
592
593         if (fields & 0x1)
594                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
595         if (fields & 0x2)
596                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
597         if (fields & 0x4)
598                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
599         if (fields & 0x8)
600                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
601
602         return mask;
603 }
604
605 static struct event_constraint *
606 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
607 {
608         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
609 }
610
611 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
612 {
613         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
614         struct extra_reg *er;
615         int idx = 0;
616
617         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
618                 if (er->event != (event->hw.config & er->config_mask))
619                         continue;
620                 idx |= er->idx;
621         }
622
623         if (idx) {
624                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
625                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
626                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
627                 reg1->idx = idx;
628         }
629         return 0;
630 }
631
632 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
633         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
634         .hw_config              = snbep_cbox_hw_config,
635         .get_constraint         = snbep_cbox_get_constraint,
636         .put_constraint         = snbep_cbox_put_constraint,
637 };
638
639 static struct intel_uncore_type snbep_uncore_cbox = {
640         .name                   = "cbox",
641         .num_counters           = 4,
642         .num_boxes              = 8,
643         .perf_ctr_bits          = 44,
644         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
645         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
646         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
647         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
648         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
649         .num_shared_regs        = 1,
650         .constraints            = snbep_uncore_cbox_constraints,
651         .ops                    = &snbep_uncore_cbox_ops,
652         .format_group           = &snbep_uncore_cbox_format_group,
653 };
654
655 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
656 {
657         struct hw_perf_event *hwc = &event->hw;
658         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
659         u64 config = reg1->config;
660
661         if (new_idx > reg1->idx)
662                 config <<= 8 * (new_idx - reg1->idx);
663         else
664                 config >>= 8 * (reg1->idx - new_idx);
665
666         if (modify) {
667                 hwc->config += new_idx - reg1->idx;
668                 reg1->config = config;
669                 reg1->idx = new_idx;
670         }
671         return config;
672 }
673
674 static struct event_constraint *
675 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
676 {
677         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
678         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
679         unsigned long flags;
680         int idx = reg1->idx;
681         u64 mask, config1 = reg1->config;
682         bool ok = false;
683
684         if (reg1->idx == EXTRA_REG_NONE ||
685             (!uncore_box_is_fake(box) && reg1->alloc))
686                 return NULL;
687 again:
688         mask = 0xffULL << (idx * 8);
689         raw_spin_lock_irqsave(&er->lock, flags);
690         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
691             !((config1 ^ er->config) & mask)) {
692                 atomic_add(1 << (idx * 8), &er->ref);
693                 er->config &= ~mask;
694                 er->config |= config1 & mask;
695                 ok = true;
696         }
697         raw_spin_unlock_irqrestore(&er->lock, flags);
698
699         if (!ok) {
700                 idx = (idx + 1) % 4;
701                 if (idx != reg1->idx) {
702                         config1 = snbep_pcu_alter_er(event, idx, false);
703                         goto again;
704                 }
705                 return &constraint_empty;
706         }
707
708         if (!uncore_box_is_fake(box)) {
709                 if (idx != reg1->idx)
710                         snbep_pcu_alter_er(event, idx, true);
711                 reg1->alloc = 1;
712         }
713         return NULL;
714 }
715
716 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
717 {
718         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
719         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
720
721         if (uncore_box_is_fake(box) || !reg1->alloc)
722                 return;
723
724         atomic_sub(1 << (reg1->idx * 8), &er->ref);
725         reg1->alloc = 0;
726 }
727
728 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
729 {
730         struct hw_perf_event *hwc = &event->hw;
731         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
732         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
733
734         if (ev_sel >= 0xb && ev_sel <= 0xe) {
735                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
736                 reg1->idx = ev_sel - 0xb;
737                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
738         }
739         return 0;
740 }
741
742 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
743         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
744         .hw_config              = snbep_pcu_hw_config,
745         .get_constraint         = snbep_pcu_get_constraint,
746         .put_constraint         = snbep_pcu_put_constraint,
747 };
748
749 static struct intel_uncore_type snbep_uncore_pcu = {
750         .name                   = "pcu",
751         .num_counters           = 4,
752         .num_boxes              = 1,
753         .perf_ctr_bits          = 48,
754         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
755         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
756         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
757         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
758         .num_shared_regs        = 1,
759         .ops                    = &snbep_uncore_pcu_ops,
760         .format_group           = &snbep_uncore_pcu_format_group,
761 };
762
763 static struct intel_uncore_type *snbep_msr_uncores[] = {
764         &snbep_uncore_ubox,
765         &snbep_uncore_cbox,
766         &snbep_uncore_pcu,
767         NULL,
768 };
769
770 enum {
771         SNBEP_PCI_QPI_PORT0_FILTER,
772         SNBEP_PCI_QPI_PORT1_FILTER,
773 };
774
775 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
776 {
777         struct hw_perf_event *hwc = &event->hw;
778         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
779         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
780
781         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
782                 reg1->idx = 0;
783                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
784                 reg1->config = event->attr.config1;
785                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
786                 reg2->config = event->attr.config2;
787         }
788         return 0;
789 }
790
791 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
792 {
793         struct pci_dev *pdev = box->pci_dev;
794         struct hw_perf_event *hwc = &event->hw;
795         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
796         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
797
798         if (reg1->idx != EXTRA_REG_NONE) {
799                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
800                 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
801                 WARN_ON_ONCE(!filter_pdev);
802                 if (filter_pdev) {
803                         pci_write_config_dword(filter_pdev, reg1->reg,
804                                                 (u32)reg1->config);
805                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
806                                                 (u32)(reg1->config >> 32));
807                         pci_write_config_dword(filter_pdev, reg2->reg,
808                                                 (u32)reg2->config);
809                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
810                                                 (u32)(reg2->config >> 32));
811                 }
812         }
813
814         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
815 }
816
817 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
818         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
819         .enable_event           = snbep_qpi_enable_event,
820         .hw_config              = snbep_qpi_hw_config,
821         .get_constraint         = uncore_get_constraint,
822         .put_constraint         = uncore_put_constraint,
823 };
824
825 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
826         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
827         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
828         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
829         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
830         .ops            = &snbep_uncore_pci_ops,                \
831         .format_group   = &snbep_uncore_format_group
832
833 static struct intel_uncore_type snbep_uncore_ha = {
834         .name           = "ha",
835         .num_counters   = 4,
836         .num_boxes      = 1,
837         .perf_ctr_bits  = 48,
838         SNBEP_UNCORE_PCI_COMMON_INIT(),
839 };
840
841 static struct intel_uncore_type snbep_uncore_imc = {
842         .name           = "imc",
843         .num_counters   = 4,
844         .num_boxes      = 4,
845         .perf_ctr_bits  = 48,
846         .fixed_ctr_bits = 48,
847         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
848         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
849         .event_descs    = snbep_uncore_imc_events,
850         SNBEP_UNCORE_PCI_COMMON_INIT(),
851 };
852
853 static struct intel_uncore_type snbep_uncore_qpi = {
854         .name                   = "qpi",
855         .num_counters           = 4,
856         .num_boxes              = 2,
857         .perf_ctr_bits          = 48,
858         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
859         .event_ctl              = SNBEP_PCI_PMON_CTL0,
860         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
861         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
862         .num_shared_regs        = 1,
863         .ops                    = &snbep_uncore_qpi_ops,
864         .event_descs            = snbep_uncore_qpi_events,
865         .format_group           = &snbep_uncore_qpi_format_group,
866 };
867
868
869 static struct intel_uncore_type snbep_uncore_r2pcie = {
870         .name           = "r2pcie",
871         .num_counters   = 4,
872         .num_boxes      = 1,
873         .perf_ctr_bits  = 44,
874         .constraints    = snbep_uncore_r2pcie_constraints,
875         SNBEP_UNCORE_PCI_COMMON_INIT(),
876 };
877
878 static struct intel_uncore_type snbep_uncore_r3qpi = {
879         .name           = "r3qpi",
880         .num_counters   = 3,
881         .num_boxes      = 2,
882         .perf_ctr_bits  = 44,
883         .constraints    = snbep_uncore_r3qpi_constraints,
884         SNBEP_UNCORE_PCI_COMMON_INIT(),
885 };
886
887 enum {
888         SNBEP_PCI_UNCORE_HA,
889         SNBEP_PCI_UNCORE_IMC,
890         SNBEP_PCI_UNCORE_QPI,
891         SNBEP_PCI_UNCORE_R2PCIE,
892         SNBEP_PCI_UNCORE_R3QPI,
893 };
894
895 static struct intel_uncore_type *snbep_pci_uncores[] = {
896         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
897         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
898         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
899         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
900         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
901         NULL,
902 };
903
904 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
905         { /* Home Agent */
906                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
907                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
908         },
909         { /* MC Channel 0 */
910                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
911                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
912         },
913         { /* MC Channel 1 */
914                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
915                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
916         },
917         { /* MC Channel 2 */
918                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
919                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
920         },
921         { /* MC Channel 3 */
922                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
923                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
924         },
925         { /* QPI Port 0 */
926                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
927                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
928         },
929         { /* QPI Port 1 */
930                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
931                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
932         },
933         { /* R2PCIe */
934                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
935                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
936         },
937         { /* R3QPI Link 0 */
938                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
939                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
940         },
941         { /* R3QPI Link 1 */
942                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
943                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
944         },
945         { /* QPI Port 0 filter  */
946                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
947                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
948                                                    SNBEP_PCI_QPI_PORT0_FILTER),
949         },
950         { /* QPI Port 0 filter  */
951                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
952                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
953                                                    SNBEP_PCI_QPI_PORT1_FILTER),
954         },
955         { /* end: all zeroes */ }
956 };
957
958 static struct pci_driver snbep_uncore_pci_driver = {
959         .name           = "snbep_uncore",
960         .id_table       = snbep_uncore_pci_ids,
961 };
962
963 /*
964  * build pci bus to socket mapping
965  */
966 static int snbep_pci2phy_map_init(int devid)
967 {
968         struct pci_dev *ubox_dev = NULL;
969         int i, bus, nodeid;
970         int err = 0;
971         u32 config = 0;
972
973         while (1) {
974                 /* find the UBOX device */
975                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
976                 if (!ubox_dev)
977                         break;
978                 bus = ubox_dev->bus->number;
979                 /* get the Node ID of the local register */
980                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
981                 if (err)
982                         break;
983                 nodeid = config;
984                 /* get the Node ID mapping */
985                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
986                 if (err)
987                         break;
988                 /*
989                  * every three bits in the Node ID mapping register maps
990                  * to a particular node.
991                  */
992                 for (i = 0; i < 8; i++) {
993                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
994                                 pcibus_to_physid[bus] = i;
995                                 break;
996                         }
997                 }
998         }
999
1000         if (!err) {
1001                 /*
1002                  * For PCI bus with no UBOX device, find the next bus
1003                  * that has UBOX device and use its mapping.
1004                  */
1005                 i = -1;
1006                 for (bus = 255; bus >= 0; bus--) {
1007                         if (pcibus_to_physid[bus] >= 0)
1008                                 i = pcibus_to_physid[bus];
1009                         else
1010                                 pcibus_to_physid[bus] = i;
1011                 }
1012         }
1013
1014         if (ubox_dev)
1015                 pci_dev_put(ubox_dev);
1016
1017         return err ? pcibios_err_to_errno(err) : 0;
1018 }
1019 /* end of Sandy Bridge-EP uncore support */
1020
1021 /* IvyTown uncore support */
1022 static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1023 {
1024         unsigned msr = uncore_msr_box_ctl(box);
1025         if (msr)
1026                 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1027 }
1028
1029 static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1030 {
1031         struct pci_dev *pdev = box->pci_dev;
1032
1033         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1034 }
1035
1036 #define IVT_UNCORE_MSR_OPS_COMMON_INIT()                        \
1037         .init_box       = ivt_uncore_msr_init_box,              \
1038         .disable_box    = snbep_uncore_msr_disable_box,         \
1039         .enable_box     = snbep_uncore_msr_enable_box,          \
1040         .disable_event  = snbep_uncore_msr_disable_event,       \
1041         .enable_event   = snbep_uncore_msr_enable_event,        \
1042         .read_counter   = uncore_msr_read_counter
1043
1044 static struct intel_uncore_ops ivt_uncore_msr_ops = {
1045         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1046 };
1047
1048 static struct intel_uncore_ops ivt_uncore_pci_ops = {
1049         .init_box       = ivt_uncore_pci_init_box,
1050         .disable_box    = snbep_uncore_pci_disable_box,
1051         .enable_box     = snbep_uncore_pci_enable_box,
1052         .disable_event  = snbep_uncore_pci_disable_event,
1053         .enable_event   = snbep_uncore_pci_enable_event,
1054         .read_counter   = snbep_uncore_pci_read_counter,
1055 };
1056
1057 #define IVT_UNCORE_PCI_COMMON_INIT()                            \
1058         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1059         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1060         .event_mask     = IVT_PMON_RAW_EVENT_MASK,              \
1061         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1062         .ops            = &ivt_uncore_pci_ops,                  \
1063         .format_group   = &ivt_uncore_format_group
1064
1065 static struct attribute *ivt_uncore_formats_attr[] = {
1066         &format_attr_event.attr,
1067         &format_attr_umask.attr,
1068         &format_attr_edge.attr,
1069         &format_attr_inv.attr,
1070         &format_attr_thresh8.attr,
1071         NULL,
1072 };
1073
1074 static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1075         &format_attr_event.attr,
1076         &format_attr_umask.attr,
1077         &format_attr_edge.attr,
1078         &format_attr_inv.attr,
1079         &format_attr_thresh5.attr,
1080         NULL,
1081 };
1082
1083 static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1084         &format_attr_event.attr,
1085         &format_attr_umask.attr,
1086         &format_attr_edge.attr,
1087         &format_attr_tid_en.attr,
1088         &format_attr_thresh8.attr,
1089         &format_attr_filter_tid.attr,
1090         &format_attr_filter_link.attr,
1091         &format_attr_filter_state2.attr,
1092         &format_attr_filter_nid2.attr,
1093         &format_attr_filter_opc2.attr,
1094         NULL,
1095 };
1096
1097 static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1098         &format_attr_event_ext.attr,
1099         &format_attr_occ_sel.attr,
1100         &format_attr_edge.attr,
1101         &format_attr_thresh5.attr,
1102         &format_attr_occ_invert.attr,
1103         &format_attr_occ_edge.attr,
1104         &format_attr_filter_band0.attr,
1105         &format_attr_filter_band1.attr,
1106         &format_attr_filter_band2.attr,
1107         &format_attr_filter_band3.attr,
1108         NULL,
1109 };
1110
1111 static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1112         &format_attr_event_ext.attr,
1113         &format_attr_umask.attr,
1114         &format_attr_edge.attr,
1115         &format_attr_thresh8.attr,
1116         &format_attr_match_rds.attr,
1117         &format_attr_match_rnid30.attr,
1118         &format_attr_match_rnid4.attr,
1119         &format_attr_match_dnid.attr,
1120         &format_attr_match_mc.attr,
1121         &format_attr_match_opc.attr,
1122         &format_attr_match_vnw.attr,
1123         &format_attr_match0.attr,
1124         &format_attr_match1.attr,
1125         &format_attr_mask_rds.attr,
1126         &format_attr_mask_rnid30.attr,
1127         &format_attr_mask_rnid4.attr,
1128         &format_attr_mask_dnid.attr,
1129         &format_attr_mask_mc.attr,
1130         &format_attr_mask_opc.attr,
1131         &format_attr_mask_vnw.attr,
1132         &format_attr_mask0.attr,
1133         &format_attr_mask1.attr,
1134         NULL,
1135 };
1136
1137 static struct attribute_group ivt_uncore_format_group = {
1138         .name = "format",
1139         .attrs = ivt_uncore_formats_attr,
1140 };
1141
1142 static struct attribute_group ivt_uncore_ubox_format_group = {
1143         .name = "format",
1144         .attrs = ivt_uncore_ubox_formats_attr,
1145 };
1146
1147 static struct attribute_group ivt_uncore_cbox_format_group = {
1148         .name = "format",
1149         .attrs = ivt_uncore_cbox_formats_attr,
1150 };
1151
1152 static struct attribute_group ivt_uncore_pcu_format_group = {
1153         .name = "format",
1154         .attrs = ivt_uncore_pcu_formats_attr,
1155 };
1156
1157 static struct attribute_group ivt_uncore_qpi_format_group = {
1158         .name = "format",
1159         .attrs = ivt_uncore_qpi_formats_attr,
1160 };
1161
1162 static struct intel_uncore_type ivt_uncore_ubox = {
1163         .name           = "ubox",
1164         .num_counters   = 2,
1165         .num_boxes      = 1,
1166         .perf_ctr_bits  = 44,
1167         .fixed_ctr_bits = 48,
1168         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1169         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1170         .event_mask     = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1171         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1172         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1173         .ops            = &ivt_uncore_msr_ops,
1174         .format_group   = &ivt_uncore_ubox_format_group,
1175 };
1176
1177 static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1178         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1179                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1180         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1181         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1182         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1183         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1184         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1185         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1186         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1187         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1188         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1189         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1190         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1191         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1192         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1193         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1194         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1195         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1196         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1197         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1198         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1199         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1200         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1201         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1202         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1203         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1204         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1205         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1206         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1207         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1208         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1209         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1210         EVENT_EXTRA_END
1211 };
1212
1213 static u64 ivt_cbox_filter_mask(int fields)
1214 {
1215         u64 mask = 0;
1216
1217         if (fields & 0x1)
1218                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1219         if (fields & 0x2)
1220                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1221         if (fields & 0x4)
1222                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1223         if (fields & 0x8)
1224                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1225         if (fields & 0x10)
1226                 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1227
1228         return mask;
1229 }
1230
1231 static struct event_constraint *
1232 ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1233 {
1234         return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1235 }
1236
1237 static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1238 {
1239         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1240         struct extra_reg *er;
1241         int idx = 0;
1242
1243         for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1244                 if (er->event != (event->hw.config & er->config_mask))
1245                         continue;
1246                 idx |= er->idx;
1247         }
1248
1249         if (idx) {
1250                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1251                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1252                 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1253                 reg1->idx = idx;
1254         }
1255         return 0;
1256 }
1257
1258 static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1259 {
1260         struct hw_perf_event *hwc = &event->hw;
1261         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1262
1263         if (reg1->idx != EXTRA_REG_NONE) {
1264                 u64 filter = uncore_shared_reg_config(box, 0);
1265                 wrmsrl(reg1->reg, filter & 0xffffffff);
1266                 wrmsrl(reg1->reg + 6, filter >> 32);
1267         }
1268
1269         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1270 }
1271
1272 static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1273         .init_box               = ivt_uncore_msr_init_box,
1274         .disable_box            = snbep_uncore_msr_disable_box,
1275         .enable_box             = snbep_uncore_msr_enable_box,
1276         .disable_event          = snbep_uncore_msr_disable_event,
1277         .enable_event           = ivt_cbox_enable_event,
1278         .read_counter           = uncore_msr_read_counter,
1279         .hw_config              = ivt_cbox_hw_config,
1280         .get_constraint         = ivt_cbox_get_constraint,
1281         .put_constraint         = snbep_cbox_put_constraint,
1282 };
1283
1284 static struct intel_uncore_type ivt_uncore_cbox = {
1285         .name                   = "cbox",
1286         .num_counters           = 4,
1287         .num_boxes              = 15,
1288         .perf_ctr_bits          = 44,
1289         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1290         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1291         .event_mask             = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1292         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1293         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1294         .num_shared_regs        = 1,
1295         .constraints            = snbep_uncore_cbox_constraints,
1296         .ops                    = &ivt_uncore_cbox_ops,
1297         .format_group           = &ivt_uncore_cbox_format_group,
1298 };
1299
1300 static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1301         IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1302         .hw_config              = snbep_pcu_hw_config,
1303         .get_constraint         = snbep_pcu_get_constraint,
1304         .put_constraint         = snbep_pcu_put_constraint,
1305 };
1306
1307 static struct intel_uncore_type ivt_uncore_pcu = {
1308         .name                   = "pcu",
1309         .num_counters           = 4,
1310         .num_boxes              = 1,
1311         .perf_ctr_bits          = 48,
1312         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1313         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1314         .event_mask             = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1315         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1316         .num_shared_regs        = 1,
1317         .ops                    = &ivt_uncore_pcu_ops,
1318         .format_group           = &ivt_uncore_pcu_format_group,
1319 };
1320
1321 static struct intel_uncore_type *ivt_msr_uncores[] = {
1322         &ivt_uncore_ubox,
1323         &ivt_uncore_cbox,
1324         &ivt_uncore_pcu,
1325         NULL,
1326 };
1327
1328 static struct intel_uncore_type ivt_uncore_ha = {
1329         .name           = "ha",
1330         .num_counters   = 4,
1331         .num_boxes      = 2,
1332         .perf_ctr_bits  = 48,
1333         IVT_UNCORE_PCI_COMMON_INIT(),
1334 };
1335
1336 static struct intel_uncore_type ivt_uncore_imc = {
1337         .name           = "imc",
1338         .num_counters   = 4,
1339         .num_boxes      = 8,
1340         .perf_ctr_bits  = 48,
1341         .fixed_ctr_bits = 48,
1342         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1343         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1344         IVT_UNCORE_PCI_COMMON_INIT(),
1345 };
1346
1347 /* registers in IRP boxes are not properly aligned */
1348 static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1349 static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1350
1351 static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1352 {
1353         struct pci_dev *pdev = box->pci_dev;
1354         struct hw_perf_event *hwc = &event->hw;
1355
1356         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1357                                hwc->config | SNBEP_PMON_CTL_EN);
1358 }
1359
1360 static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1361 {
1362         struct pci_dev *pdev = box->pci_dev;
1363         struct hw_perf_event *hwc = &event->hw;
1364
1365         pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1366 }
1367
1368 static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1369 {
1370         struct pci_dev *pdev = box->pci_dev;
1371         struct hw_perf_event *hwc = &event->hw;
1372         u64 count = 0;
1373
1374         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1375         pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1376
1377         return count;
1378 }
1379
1380 static struct intel_uncore_ops ivt_uncore_irp_ops = {
1381         .init_box       = ivt_uncore_pci_init_box,
1382         .disable_box    = snbep_uncore_pci_disable_box,
1383         .enable_box     = snbep_uncore_pci_enable_box,
1384         .disable_event  = ivt_uncore_irp_disable_event,
1385         .enable_event   = ivt_uncore_irp_enable_event,
1386         .read_counter   = ivt_uncore_irp_read_counter,
1387 };
1388
1389 static struct intel_uncore_type ivt_uncore_irp = {
1390         .name                   = "irp",
1391         .num_counters           = 4,
1392         .num_boxes              = 1,
1393         .perf_ctr_bits          = 48,
1394         .event_mask             = IVT_PMON_RAW_EVENT_MASK,
1395         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1396         .ops                    = &ivt_uncore_irp_ops,
1397         .format_group           = &ivt_uncore_format_group,
1398 };
1399
1400 static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1401         .init_box       = ivt_uncore_pci_init_box,
1402         .disable_box    = snbep_uncore_pci_disable_box,
1403         .enable_box     = snbep_uncore_pci_enable_box,
1404         .disable_event  = snbep_uncore_pci_disable_event,
1405         .enable_event   = snbep_qpi_enable_event,
1406         .read_counter   = snbep_uncore_pci_read_counter,
1407         .hw_config      = snbep_qpi_hw_config,
1408         .get_constraint = uncore_get_constraint,
1409         .put_constraint = uncore_put_constraint,
1410 };
1411
1412 static struct intel_uncore_type ivt_uncore_qpi = {
1413         .name                   = "qpi",
1414         .num_counters           = 4,
1415         .num_boxes              = 3,
1416         .perf_ctr_bits          = 48,
1417         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1418         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1419         .event_mask             = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1420         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1421         .num_shared_regs        = 1,
1422         .ops                    = &ivt_uncore_qpi_ops,
1423         .format_group           = &ivt_uncore_qpi_format_group,
1424 };
1425
1426 static struct intel_uncore_type ivt_uncore_r2pcie = {
1427         .name           = "r2pcie",
1428         .num_counters   = 4,
1429         .num_boxes      = 1,
1430         .perf_ctr_bits  = 44,
1431         .constraints    = snbep_uncore_r2pcie_constraints,
1432         IVT_UNCORE_PCI_COMMON_INIT(),
1433 };
1434
1435 static struct intel_uncore_type ivt_uncore_r3qpi = {
1436         .name           = "r3qpi",
1437         .num_counters   = 3,
1438         .num_boxes      = 2,
1439         .perf_ctr_bits  = 44,
1440         .constraints    = snbep_uncore_r3qpi_constraints,
1441         IVT_UNCORE_PCI_COMMON_INIT(),
1442 };
1443
1444 enum {
1445         IVT_PCI_UNCORE_HA,
1446         IVT_PCI_UNCORE_IMC,
1447         IVT_PCI_UNCORE_IRP,
1448         IVT_PCI_UNCORE_QPI,
1449         IVT_PCI_UNCORE_R2PCIE,
1450         IVT_PCI_UNCORE_R3QPI,
1451 };
1452
1453 static struct intel_uncore_type *ivt_pci_uncores[] = {
1454         [IVT_PCI_UNCORE_HA]     = &ivt_uncore_ha,
1455         [IVT_PCI_UNCORE_IMC]    = &ivt_uncore_imc,
1456         [IVT_PCI_UNCORE_IRP]    = &ivt_uncore_irp,
1457         [IVT_PCI_UNCORE_QPI]    = &ivt_uncore_qpi,
1458         [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1459         [IVT_PCI_UNCORE_R3QPI]  = &ivt_uncore_r3qpi,
1460         NULL,
1461 };
1462
1463 static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1464         { /* Home Agent 0 */
1465                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1466                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1467         },
1468         { /* Home Agent 1 */
1469                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1470                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1471         },
1472         { /* MC0 Channel 0 */
1473                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1474                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1475         },
1476         { /* MC0 Channel 1 */
1477                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1478                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1479         },
1480         { /* MC0 Channel 3 */
1481                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1482                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1483         },
1484         { /* MC0 Channel 4 */
1485                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1486                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1487         },
1488         { /* MC1 Channel 0 */
1489                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1490                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1491         },
1492         { /* MC1 Channel 1 */
1493                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1494                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1495         },
1496         { /* MC1 Channel 3 */
1497                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1498                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1499         },
1500         { /* MC1 Channel 4 */
1501                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1502                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1503         },
1504         { /* IRP */
1505                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1506                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1507         },
1508         { /* QPI0 Port 0 */
1509                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1510                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1511         },
1512         { /* QPI0 Port 1 */
1513                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1514                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1515         },
1516         { /* QPI1 Port 2 */
1517                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1518                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1519         },
1520         { /* R2PCIe */
1521                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1522                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1523         },
1524         { /* R3QPI0 Link 0 */
1525                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1526                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1527         },
1528         { /* R3QPI0 Link 1 */
1529                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1530                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1531         },
1532         { /* R3QPI1 Link 2 */
1533                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1534                 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1535         },
1536         { /* QPI Port 0 filter  */
1537                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1538                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1539                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1540         },
1541         { /* QPI Port 0 filter  */
1542                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1543                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1544                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1545         },
1546         { /* end: all zeroes */ }
1547 };
1548
1549 static struct pci_driver ivt_uncore_pci_driver = {
1550         .name           = "ivt_uncore",
1551         .id_table       = ivt_uncore_pci_ids,
1552 };
1553 /* end of IvyTown uncore support */
1554
1555 /* Sandy Bridge uncore support */
1556 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1557 {
1558         struct hw_perf_event *hwc = &event->hw;
1559
1560         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1561                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1562         else
1563                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1564 }
1565
1566 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1567 {
1568         wrmsrl(event->hw.config_base, 0);
1569 }
1570
1571 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1572 {
1573         if (box->pmu->pmu_idx == 0) {
1574                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1575                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1576         }
1577 }
1578
1579 static struct uncore_event_desc snb_uncore_events[] = {
1580         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1581         { /* end: all zeroes */ },
1582 };
1583
1584 static struct attribute *snb_uncore_formats_attr[] = {
1585         &format_attr_event.attr,
1586         &format_attr_umask.attr,
1587         &format_attr_edge.attr,
1588         &format_attr_inv.attr,
1589         &format_attr_cmask5.attr,
1590         NULL,
1591 };
1592
1593 static struct attribute_group snb_uncore_format_group = {
1594         .name           = "format",
1595         .attrs          = snb_uncore_formats_attr,
1596 };
1597
1598 static struct intel_uncore_ops snb_uncore_msr_ops = {
1599         .init_box       = snb_uncore_msr_init_box,
1600         .disable_event  = snb_uncore_msr_disable_event,
1601         .enable_event   = snb_uncore_msr_enable_event,
1602         .read_counter   = uncore_msr_read_counter,
1603 };
1604
1605 static struct event_constraint snb_uncore_cbox_constraints[] = {
1606         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1607         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1608         EVENT_CONSTRAINT_END
1609 };
1610
1611 static struct intel_uncore_type snb_uncore_cbox = {
1612         .name           = "cbox",
1613         .num_counters   = 2,
1614         .num_boxes      = 4,
1615         .perf_ctr_bits  = 44,
1616         .fixed_ctr_bits = 48,
1617         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
1618         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
1619         .fixed_ctr      = SNB_UNC_FIXED_CTR,
1620         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
1621         .single_fixed   = 1,
1622         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
1623         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
1624         .constraints    = snb_uncore_cbox_constraints,
1625         .ops            = &snb_uncore_msr_ops,
1626         .format_group   = &snb_uncore_format_group,
1627         .event_descs    = snb_uncore_events,
1628 };
1629
1630 static struct intel_uncore_type *snb_msr_uncores[] = {
1631         &snb_uncore_cbox,
1632         NULL,
1633 };
1634 /* end of Sandy Bridge uncore support */
1635
1636 /* Nehalem uncore support */
1637 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1638 {
1639         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1640 }
1641
1642 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1643 {
1644         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1645 }
1646
1647 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1648 {
1649         struct hw_perf_event *hwc = &event->hw;
1650
1651         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1652                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1653         else
1654                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1655 }
1656
1657 static struct attribute *nhm_uncore_formats_attr[] = {
1658         &format_attr_event.attr,
1659         &format_attr_umask.attr,
1660         &format_attr_edge.attr,
1661         &format_attr_inv.attr,
1662         &format_attr_cmask8.attr,
1663         NULL,
1664 };
1665
1666 static struct attribute_group nhm_uncore_format_group = {
1667         .name = "format",
1668         .attrs = nhm_uncore_formats_attr,
1669 };
1670
1671 static struct uncore_event_desc nhm_uncore_events[] = {
1672         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
1673         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
1674         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
1675         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
1676         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
1677         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
1678         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1679         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
1680         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
1681         { /* end: all zeroes */ },
1682 };
1683
1684 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1685         .disable_box    = nhm_uncore_msr_disable_box,
1686         .enable_box     = nhm_uncore_msr_enable_box,
1687         .disable_event  = snb_uncore_msr_disable_event,
1688         .enable_event   = nhm_uncore_msr_enable_event,
1689         .read_counter   = uncore_msr_read_counter,
1690 };
1691
1692 static struct intel_uncore_type nhm_uncore = {
1693         .name           = "",
1694         .num_counters   = 8,
1695         .num_boxes      = 1,
1696         .perf_ctr_bits  = 48,
1697         .fixed_ctr_bits = 48,
1698         .event_ctl      = NHM_UNC_PERFEVTSEL0,
1699         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
1700         .fixed_ctr      = NHM_UNC_FIXED_CTR,
1701         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
1702         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
1703         .event_descs    = nhm_uncore_events,
1704         .ops            = &nhm_uncore_msr_ops,
1705         .format_group   = &nhm_uncore_format_group,
1706 };
1707
1708 static struct intel_uncore_type *nhm_msr_uncores[] = {
1709         &nhm_uncore,
1710         NULL,
1711 };
1712 /* end of Nehalem uncore support */
1713
1714 /* Nehalem-EX uncore support */
1715 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
1716 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
1717 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
1718 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
1719
1720 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
1721 {
1722         wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
1723 }
1724
1725 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
1726 {
1727         unsigned msr = uncore_msr_box_ctl(box);
1728         u64 config;
1729
1730         if (msr) {
1731                 rdmsrl(msr, config);
1732                 config &= ~((1ULL << uncore_num_counters(box)) - 1);
1733                 /* WBox has a fixed counter */
1734                 if (uncore_msr_fixed_ctl(box))
1735                         config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
1736                 wrmsrl(msr, config);
1737         }
1738 }
1739
1740 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
1741 {
1742         unsigned msr = uncore_msr_box_ctl(box);
1743         u64 config;
1744
1745         if (msr) {
1746                 rdmsrl(msr, config);
1747                 config |= (1ULL << uncore_num_counters(box)) - 1;
1748                 /* WBox has a fixed counter */
1749                 if (uncore_msr_fixed_ctl(box))
1750                         config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
1751                 wrmsrl(msr, config);
1752         }
1753 }
1754
1755 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1756 {
1757         wrmsrl(event->hw.config_base, 0);
1758 }
1759
1760 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1761 {
1762         struct hw_perf_event *hwc = &event->hw;
1763
1764         if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
1765                 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
1766         else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
1767                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1768         else
1769                 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1770 }
1771
1772 #define NHMEX_UNCORE_OPS_COMMON_INIT()                          \
1773         .init_box       = nhmex_uncore_msr_init_box,            \
1774         .disable_box    = nhmex_uncore_msr_disable_box,         \
1775         .enable_box     = nhmex_uncore_msr_enable_box,          \
1776         .disable_event  = nhmex_uncore_msr_disable_event,       \
1777         .read_counter   = uncore_msr_read_counter
1778
1779 static struct intel_uncore_ops nhmex_uncore_ops = {
1780         NHMEX_UNCORE_OPS_COMMON_INIT(),
1781         .enable_event   = nhmex_uncore_msr_enable_event,
1782 };
1783
1784 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
1785         &format_attr_event.attr,
1786         &format_attr_edge.attr,
1787         NULL,
1788 };
1789
1790 static struct attribute_group nhmex_uncore_ubox_format_group = {
1791         .name           = "format",
1792         .attrs          = nhmex_uncore_ubox_formats_attr,
1793 };
1794
1795 static struct intel_uncore_type nhmex_uncore_ubox = {
1796         .name           = "ubox",
1797         .num_counters   = 1,
1798         .num_boxes      = 1,
1799         .perf_ctr_bits  = 48,
1800         .event_ctl      = NHMEX_U_MSR_PMON_EV_SEL,
1801         .perf_ctr       = NHMEX_U_MSR_PMON_CTR,
1802         .event_mask     = NHMEX_U_PMON_RAW_EVENT_MASK,
1803         .box_ctl        = NHMEX_U_MSR_PMON_GLOBAL_CTL,
1804         .ops            = &nhmex_uncore_ops,
1805         .format_group   = &nhmex_uncore_ubox_format_group
1806 };
1807
1808 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
1809         &format_attr_event.attr,
1810         &format_attr_umask.attr,
1811         &format_attr_edge.attr,
1812         &format_attr_inv.attr,
1813         &format_attr_thresh8.attr,
1814         NULL,
1815 };
1816
1817 static struct attribute_group nhmex_uncore_cbox_format_group = {
1818         .name = "format",
1819         .attrs = nhmex_uncore_cbox_formats_attr,
1820 };
1821
1822 /* msr offset for each instance of cbox */
1823 static unsigned nhmex_cbox_msr_offsets[] = {
1824         0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
1825 };
1826
1827 static struct intel_uncore_type nhmex_uncore_cbox = {
1828         .name                   = "cbox",
1829         .num_counters           = 6,
1830         .num_boxes              = 10,
1831         .perf_ctr_bits          = 48,
1832         .event_ctl              = NHMEX_C0_MSR_PMON_EV_SEL0,
1833         .perf_ctr               = NHMEX_C0_MSR_PMON_CTR0,
1834         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
1835         .box_ctl                = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
1836         .msr_offsets            = nhmex_cbox_msr_offsets,
1837         .pair_ctr_ctl           = 1,
1838         .ops                    = &nhmex_uncore_ops,
1839         .format_group           = &nhmex_uncore_cbox_format_group
1840 };
1841
1842 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
1843         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
1844         { /* end: all zeroes */ },
1845 };
1846
1847 static struct intel_uncore_type nhmex_uncore_wbox = {
1848         .name                   = "wbox",
1849         .num_counters           = 4,
1850         .num_boxes              = 1,
1851         .perf_ctr_bits          = 48,
1852         .event_ctl              = NHMEX_W_MSR_PMON_CNT0,
1853         .perf_ctr               = NHMEX_W_MSR_PMON_EVT_SEL0,
1854         .fixed_ctr              = NHMEX_W_MSR_PMON_FIXED_CTR,
1855         .fixed_ctl              = NHMEX_W_MSR_PMON_FIXED_CTL,
1856         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
1857         .box_ctl                = NHMEX_W_MSR_GLOBAL_CTL,
1858         .pair_ctr_ctl           = 1,
1859         .event_descs            = nhmex_uncore_wbox_events,
1860         .ops                    = &nhmex_uncore_ops,
1861         .format_group           = &nhmex_uncore_cbox_format_group
1862 };
1863
1864 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1865 {
1866         struct hw_perf_event *hwc = &event->hw;
1867         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1868         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1869         int ctr, ev_sel;
1870
1871         ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
1872                 NHMEX_B_PMON_CTR_SHIFT;
1873         ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
1874                   NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
1875
1876         /* events that do not use the match/mask registers */
1877         if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
1878             (ctr == 2 && ev_sel != 0x4) || ctr == 3)
1879                 return 0;
1880
1881         if (box->pmu->pmu_idx == 0)
1882                 reg1->reg = NHMEX_B0_MSR_MATCH;
1883         else
1884                 reg1->reg = NHMEX_B1_MSR_MATCH;
1885         reg1->idx = 0;
1886         reg1->config = event->attr.config1;
1887         reg2->config = event->attr.config2;
1888         return 0;
1889 }
1890
1891 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1892 {
1893         struct hw_perf_event *hwc = &event->hw;
1894         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1895         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1896
1897         if (reg1->idx != EXTRA_REG_NONE) {
1898                 wrmsrl(reg1->reg, reg1->config);
1899                 wrmsrl(reg1->reg + 1, reg2->config);
1900         }
1901         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1902                 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
1903 }
1904
1905 /*
1906  * The Bbox has 4 counters, but each counter monitors different events.
1907  * Use bits 6-7 in the event config to select counter.
1908  */
1909 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
1910         EVENT_CONSTRAINT(0 , 1, 0xc0),
1911         EVENT_CONSTRAINT(0x40, 2, 0xc0),
1912         EVENT_CONSTRAINT(0x80, 4, 0xc0),
1913         EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1914         EVENT_CONSTRAINT_END,
1915 };
1916
1917 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1918         &format_attr_event5.attr,
1919         &format_attr_counter.attr,
1920         &format_attr_match.attr,
1921         &format_attr_mask.attr,
1922         NULL,
1923 };
1924
1925 static struct attribute_group nhmex_uncore_bbox_format_group = {
1926         .name = "format",
1927         .attrs = nhmex_uncore_bbox_formats_attr,
1928 };
1929
1930 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1931         NHMEX_UNCORE_OPS_COMMON_INIT(),
1932         .enable_event           = nhmex_bbox_msr_enable_event,
1933         .hw_config              = nhmex_bbox_hw_config,
1934         .get_constraint         = uncore_get_constraint,
1935         .put_constraint         = uncore_put_constraint,
1936 };
1937
1938 static struct intel_uncore_type nhmex_uncore_bbox = {
1939         .name                   = "bbox",
1940         .num_counters           = 4,
1941         .num_boxes              = 2,
1942         .perf_ctr_bits          = 48,
1943         .event_ctl              = NHMEX_B0_MSR_PMON_CTL0,
1944         .perf_ctr               = NHMEX_B0_MSR_PMON_CTR0,
1945         .event_mask             = NHMEX_B_PMON_RAW_EVENT_MASK,
1946         .box_ctl                = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1947         .msr_offset             = NHMEX_B_MSR_OFFSET,
1948         .pair_ctr_ctl           = 1,
1949         .num_shared_regs        = 1,
1950         .constraints            = nhmex_uncore_bbox_constraints,
1951         .ops                    = &nhmex_uncore_bbox_ops,
1952         .format_group           = &nhmex_uncore_bbox_format_group
1953 };
1954
1955 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1956 {
1957         struct hw_perf_event *hwc = &event->hw;
1958         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1959         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1960
1961         /* only TO_R_PROG_EV event uses the match/mask register */
1962         if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1963             NHMEX_S_EVENT_TO_R_PROG_EV)
1964                 return 0;
1965
1966         if (box->pmu->pmu_idx == 0)
1967                 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1968         else
1969                 reg1->reg = NHMEX_S1_MSR_MM_CFG;
1970         reg1->idx = 0;
1971         reg1->config = event->attr.config1;
1972         reg2->config = event->attr.config2;
1973         return 0;
1974 }
1975
1976 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1977 {
1978         struct hw_perf_event *hwc = &event->hw;
1979         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1980         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1981
1982         if (reg1->idx != EXTRA_REG_NONE) {
1983                 wrmsrl(reg1->reg, 0);
1984                 wrmsrl(reg1->reg + 1, reg1->config);
1985                 wrmsrl(reg1->reg + 2, reg2->config);
1986                 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1987         }
1988         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1989 }
1990
1991 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1992         &format_attr_event.attr,
1993         &format_attr_umask.attr,
1994         &format_attr_edge.attr,
1995         &format_attr_inv.attr,
1996         &format_attr_thresh8.attr,
1997         &format_attr_match.attr,
1998         &format_attr_mask.attr,
1999         NULL,
2000 };
2001
2002 static struct attribute_group nhmex_uncore_sbox_format_group = {
2003         .name                   = "format",
2004         .attrs                  = nhmex_uncore_sbox_formats_attr,
2005 };
2006
2007 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2008         NHMEX_UNCORE_OPS_COMMON_INIT(),
2009         .enable_event           = nhmex_sbox_msr_enable_event,
2010         .hw_config              = nhmex_sbox_hw_config,
2011         .get_constraint         = uncore_get_constraint,
2012         .put_constraint         = uncore_put_constraint,
2013 };
2014
2015 static struct intel_uncore_type nhmex_uncore_sbox = {
2016         .name                   = "sbox",
2017         .num_counters           = 4,
2018         .num_boxes              = 2,
2019         .perf_ctr_bits          = 48,
2020         .event_ctl              = NHMEX_S0_MSR_PMON_CTL0,
2021         .perf_ctr               = NHMEX_S0_MSR_PMON_CTR0,
2022         .event_mask             = NHMEX_PMON_RAW_EVENT_MASK,
2023         .box_ctl                = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2024         .msr_offset             = NHMEX_S_MSR_OFFSET,
2025         .pair_ctr_ctl           = 1,
2026         .num_shared_regs        = 1,
2027         .ops                    = &nhmex_uncore_sbox_ops,
2028         .format_group           = &nhmex_uncore_sbox_format_group
2029 };
2030
2031 enum {
2032         EXTRA_REG_NHMEX_M_FILTER,
2033         EXTRA_REG_NHMEX_M_DSP,
2034         EXTRA_REG_NHMEX_M_ISS,
2035         EXTRA_REG_NHMEX_M_MAP,
2036         EXTRA_REG_NHMEX_M_MSC_THR,
2037         EXTRA_REG_NHMEX_M_PGT,
2038         EXTRA_REG_NHMEX_M_PLD,
2039         EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2040 };
2041
2042 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2043         MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2044         MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2045         MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2046         MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2047         /* event 0xa uses two extra registers */
2048         MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2049         MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2050         MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2051         /* events 0xd ~ 0x10 use the same extra register */
2052         MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2053         MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2054         MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2055         MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2056         MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2057         MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2058         MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2059         MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2060         MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2061         EVENT_EXTRA_END
2062 };
2063
2064 /* Nehalem-EX or Westmere-EX ? */
2065 static bool uncore_nhmex;
2066
2067 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2068 {
2069         struct intel_uncore_extra_reg *er;
2070         unsigned long flags;
2071         bool ret = false;
2072         u64 mask;
2073
2074         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2075                 er = &box->shared_regs[idx];
2076                 raw_spin_lock_irqsave(&er->lock, flags);
2077                 if (!atomic_read(&er->ref) || er->config == config) {
2078                         atomic_inc(&er->ref);
2079                         er->config = config;
2080                         ret = true;
2081                 }
2082                 raw_spin_unlock_irqrestore(&er->lock, flags);
2083
2084                 return ret;
2085         }
2086         /*
2087          * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2088          * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2089          * fields which are shared.
2090          */
2091         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2092         if (WARN_ON_ONCE(idx >= 4))
2093                 return false;
2094
2095         /* mask of the shared fields */
2096         if (uncore_nhmex)
2097                 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2098         else
2099                 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
2100         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2101
2102         raw_spin_lock_irqsave(&er->lock, flags);
2103         /* add mask of the non-shared field if it's in use */
2104         if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2105                 if (uncore_nhmex)
2106                         mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2107                 else
2108                         mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2109         }
2110
2111         if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2112                 atomic_add(1 << (idx * 8), &er->ref);
2113                 if (uncore_nhmex)
2114                         mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2115                                 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2116                 else
2117                         mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2118                                 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2119                 er->config &= ~mask;
2120                 er->config |= (config & mask);
2121                 ret = true;
2122         }
2123         raw_spin_unlock_irqrestore(&er->lock, flags);
2124
2125         return ret;
2126 }
2127
2128 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2129 {
2130         struct intel_uncore_extra_reg *er;
2131
2132         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2133                 er = &box->shared_regs[idx];
2134                 atomic_dec(&er->ref);
2135                 return;
2136         }
2137
2138         idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2139         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2140         atomic_sub(1 << (idx * 8), &er->ref);
2141 }
2142
2143 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
2144 {
2145         struct hw_perf_event *hwc = &event->hw;
2146         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2147         u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
2148         u64 config = reg1->config;
2149
2150         /* get the non-shared control bits and shift them */
2151         idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2152         if (uncore_nhmex)
2153                 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2154         else
2155                 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2156         if (new_idx > orig_idx) {
2157                 idx = new_idx - orig_idx;
2158                 config <<= 3 * idx;
2159         } else {
2160                 idx = orig_idx - new_idx;
2161                 config >>= 3 * idx;
2162         }
2163
2164         /* add the shared control bits back */
2165         if (uncore_nhmex)
2166                 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2167         else
2168                 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2169         config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2170         if (modify) {
2171                 /* adjust the main event selector */
2172                 if (new_idx > orig_idx)
2173                         hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2174                 else
2175                         hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2176                 reg1->config = config;
2177                 reg1->idx = ~0xff | new_idx;
2178         }
2179         return config;
2180 }
2181
2182 static struct event_constraint *
2183 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2184 {
2185         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2186         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2187         int i, idx[2], alloc = 0;
2188         u64 config1 = reg1->config;
2189
2190         idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2191         idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2192 again:
2193         for (i = 0; i < 2; i++) {
2194                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2195                         idx[i] = 0xff;
2196
2197                 if (idx[i] == 0xff)
2198                         continue;
2199
2200                 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2201                                 __BITS_VALUE(config1, i, 32)))
2202                         goto fail;
2203                 alloc |= (0x1 << i);
2204         }
2205
2206         /* for the match/mask registers */
2207         if (reg2->idx != EXTRA_REG_NONE &&
2208             (uncore_box_is_fake(box) || !reg2->alloc) &&
2209             !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2210                 goto fail;
2211
2212         /*
2213          * If it's a fake box -- as per validate_{group,event}() we
2214          * shouldn't touch event state and we can avoid doing so
2215          * since both will only call get_event_constraints() once
2216          * on each event, this avoids the need for reg->alloc.
2217          */
2218         if (!uncore_box_is_fake(box)) {
2219                 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2220                         nhmex_mbox_alter_er(event, idx[0], true);
2221                 reg1->alloc |= alloc;
2222                 if (reg2->idx != EXTRA_REG_NONE)
2223                         reg2->alloc = 1;
2224         }
2225         return NULL;
2226 fail:
2227         if (idx[0] != 0xff && !(alloc & 0x1) &&
2228             idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2229                 /*
2230                  * events 0xd ~ 0x10 are functional identical, but are
2231                  * controlled by different fields in the ZDP_CTL_FVC
2232                  * register. If we failed to take one field, try the
2233                  * rest 3 choices.
2234                  */
2235                 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2236                 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2237                 idx[0] = (idx[0] + 1) % 4;
2238                 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2239                 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2240                         config1 = nhmex_mbox_alter_er(event, idx[0], false);
2241                         goto again;
2242                 }
2243         }
2244
2245         if (alloc & 0x1)
2246                 nhmex_mbox_put_shared_reg(box, idx[0]);
2247         if (alloc & 0x2)
2248                 nhmex_mbox_put_shared_reg(box, idx[1]);
2249         return &constraint_empty;
2250 }
2251
2252 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2253 {
2254         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2255         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2256
2257         if (uncore_box_is_fake(box))
2258                 return;
2259
2260         if (reg1->alloc & 0x1)
2261                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2262         if (reg1->alloc & 0x2)
2263                 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2264         reg1->alloc = 0;
2265
2266         if (reg2->alloc) {
2267                 nhmex_mbox_put_shared_reg(box, reg2->idx);
2268                 reg2->alloc = 0;
2269         }
2270 }
2271
2272 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
2273 {
2274         if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2275                 return er->idx;
2276         return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
2277 }
2278
2279 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2280 {
2281         struct intel_uncore_type *type = box->pmu->type;
2282         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2283         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2284         struct extra_reg *er;
2285         unsigned msr;
2286         int reg_idx = 0;
2287         /*
2288          * The mbox events may require 2 extra MSRs at the most. But only
2289          * the lower 32 bits in these MSRs are significant, so we can use
2290          * config1 to pass two MSRs' config.
2291          */
2292         for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2293                 if (er->event != (event->hw.config & er->config_mask))
2294                         continue;
2295                 if (event->attr.config1 & ~er->valid_mask)
2296                         return -EINVAL;
2297
2298                 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2299                 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2300                         return -EINVAL;
2301
2302                 /* always use the 32~63 bits to pass the PLD config */
2303                 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2304                         reg_idx = 1;
2305                 else if (WARN_ON_ONCE(reg_idx > 0))
2306                         return -EINVAL;
2307
2308                 reg1->idx &= ~(0xff << (reg_idx * 8));
2309                 reg1->reg &= ~(0xffff << (reg_idx * 16));
2310                 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2311                 reg1->reg |= msr << (reg_idx * 16);
2312                 reg1->config = event->attr.config1;
2313                 reg_idx++;
2314         }
2315         /*
2316          * The mbox only provides ability to perform address matching
2317          * for the PLD events.
2318          */
2319         if (reg_idx == 2) {
2320                 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2321                 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2322                         reg2->config = event->attr.config2;
2323                 else
2324                         reg2->config = ~0ULL;
2325                 if (box->pmu->pmu_idx == 0)
2326                         reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2327                 else
2328                         reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2329         }
2330         return 0;
2331 }
2332
2333 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
2334 {
2335         struct intel_uncore_extra_reg *er;
2336         unsigned long flags;
2337         u64 config;
2338
2339         if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2340                 return box->shared_regs[idx].config;
2341
2342         er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2343         raw_spin_lock_irqsave(&er->lock, flags);
2344         config = er->config;
2345         raw_spin_unlock_irqrestore(&er->lock, flags);
2346         return config;
2347 }
2348
2349 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2350 {
2351         struct hw_perf_event *hwc = &event->hw;
2352         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2353         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2354         int idx;
2355
2356         idx = __BITS_VALUE(reg1->idx, 0, 8);
2357         if (idx != 0xff)
2358                 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2359                         nhmex_mbox_shared_reg_config(box, idx));
2360         idx = __BITS_VALUE(reg1->idx, 1, 8);
2361         if (idx != 0xff)
2362                 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2363                         nhmex_mbox_shared_reg_config(box, idx));
2364
2365         if (reg2->idx != EXTRA_REG_NONE) {
2366                 wrmsrl(reg2->reg, 0);
2367                 if (reg2->config != ~0ULL) {
2368                         wrmsrl(reg2->reg + 1,
2369                                 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2370                         wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2371                                 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2372                         wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2373                 }
2374         }
2375
2376         wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
2377 }
2378
2379 DEFINE_UNCORE_FORMAT_ATTR(count_mode,           count_mode,     "config:2-3");
2380 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,         storage_mode,   "config:4-5");
2381 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,            wrap_mode,      "config:6");
2382 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,            flag_mode,      "config:7");
2383 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,              inc_sel,        "config:9-13");
2384 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,         set_flag_sel,   "config:19-21");
2385 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,        filter_cfg_en,  "config2:63");
2386 DEFINE_UNCORE_FORMAT_ATTR(filter_match,         filter_match,   "config2:0-33");
2387 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,          filter_mask,    "config2:34-61");
2388 DEFINE_UNCORE_FORMAT_ATTR(dsp,                  dsp,            "config1:0-31");
2389 DEFINE_UNCORE_FORMAT_ATTR(thr,                  thr,            "config1:0-31");
2390 DEFINE_UNCORE_FORMAT_ATTR(fvc,                  fvc,            "config1:0-31");
2391 DEFINE_UNCORE_FORMAT_ATTR(pgt,                  pgt,            "config1:0-31");
2392 DEFINE_UNCORE_FORMAT_ATTR(map,                  map,            "config1:0-31");
2393 DEFINE_UNCORE_FORMAT_ATTR(iss,                  iss,            "config1:0-31");
2394 DEFINE_UNCORE_FORMAT_ATTR(pld,                  pld,            "config1:32-63");
2395
2396 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2397         &format_attr_count_mode.attr,
2398         &format_attr_storage_mode.attr,
2399         &format_attr_wrap_mode.attr,
2400         &format_attr_flag_mode.attr,
2401         &format_attr_inc_sel.attr,
2402         &format_attr_set_flag_sel.attr,
2403         &format_attr_filter_cfg_en.attr,
2404         &format_attr_filter_match.attr,
2405         &format_attr_filter_mask.attr,
2406         &format_attr_dsp.attr,
2407         &format_attr_thr.attr,
2408         &format_attr_fvc.attr,
2409         &format_attr_pgt.attr,
2410         &format_attr_map.attr,
2411         &format_attr_iss.attr,
2412         &format_attr_pld.attr,
2413         NULL,
2414 };
2415
2416 static struct attribute_group nhmex_uncore_mbox_format_group = {
2417         .name           = "format",
2418         .attrs          = nhmex_uncore_mbox_formats_attr,
2419 };
2420
2421 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2422         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2423         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2424         { /* end: all zeroes */ },
2425 };
2426
2427 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2428         INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2429         INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2430         { /* end: all zeroes */ },
2431 };
2432
2433 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2434         NHMEX_UNCORE_OPS_COMMON_INIT(),
2435         .enable_event   = nhmex_mbox_msr_enable_event,
2436         .hw_config      = nhmex_mbox_hw_config,
2437         .get_constraint = nhmex_mbox_get_constraint,
2438         .put_constraint = nhmex_mbox_put_constraint,
2439 };
2440
2441 static struct intel_uncore_type nhmex_uncore_mbox = {
2442         .name                   = "mbox",
2443         .num_counters           = 6,
2444         .num_boxes              = 2,
2445         .perf_ctr_bits          = 48,
2446         .event_ctl              = NHMEX_M0_MSR_PMU_CTL0,
2447         .perf_ctr               = NHMEX_M0_MSR_PMU_CNT0,
2448         .event_mask             = NHMEX_M_PMON_RAW_EVENT_MASK,
2449         .box_ctl                = NHMEX_M0_MSR_GLOBAL_CTL,
2450         .msr_offset             = NHMEX_M_MSR_OFFSET,
2451         .pair_ctr_ctl           = 1,
2452         .num_shared_regs        = 8,
2453         .event_descs            = nhmex_uncore_mbox_events,
2454         .ops                    = &nhmex_uncore_mbox_ops,
2455         .format_group           = &nhmex_uncore_mbox_format_group,
2456 };
2457
2458 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
2459 {
2460         struct hw_perf_event *hwc = &event->hw;
2461         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2462
2463         /* adjust the main event selector and extra register index */
2464         if (reg1->idx % 2) {
2465                 reg1->idx--;
2466                 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2467         } else {
2468                 reg1->idx++;
2469                 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2470         }
2471
2472         /* adjust extra register config */
2473         switch (reg1->idx % 6) {
2474         case 2:
2475                 /* shift the 8~15 bits to the 0~7 bits */
2476                 reg1->config >>= 8;
2477                 break;
2478         case 3:
2479                 /* shift the 0~7 bits to the 8~15 bits */
2480                 reg1->config <<= 8;
2481                 break;
2482         };
2483 }
2484
2485 /*
2486  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2487  * An event set consists of 6 events, the 3rd and 4th events in
2488  * an event set use the same extra register. So an event set uses
2489  * 5 extra registers.
2490  */
2491 static struct event_constraint *
2492 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2493 {
2494         struct hw_perf_event *hwc = &event->hw;
2495         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2496         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2497         struct intel_uncore_extra_reg *er;
2498         unsigned long flags;
2499         int idx, er_idx;
2500         u64 config1;
2501         bool ok = false;
2502
2503         if (!uncore_box_is_fake(box) && reg1->alloc)
2504                 return NULL;
2505
2506         idx = reg1->idx % 6;
2507         config1 = reg1->config;
2508 again:
2509         er_idx = idx;
2510         /* the 3rd and 4th events use the same extra register */
2511         if (er_idx > 2)
2512                 er_idx--;
2513         er_idx += (reg1->idx / 6) * 5;
2514
2515         er = &box->shared_regs[er_idx];
2516         raw_spin_lock_irqsave(&er->lock, flags);
2517         if (idx < 2) {
2518                 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2519                         atomic_inc(&er->ref);
2520                         er->config = reg1->config;
2521                         ok = true;
2522                 }
2523         } else if (idx == 2 || idx == 3) {
2524                 /*
2525                  * these two events use different fields in a extra register,
2526                  * the 0~7 bits and the 8~15 bits respectively.
2527                  */
2528                 u64 mask = 0xff << ((idx - 2) * 8);
2529                 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2530                                 !((er->config ^ config1) & mask)) {
2531                         atomic_add(1 << ((idx - 2) * 8), &er->ref);
2532                         er->config &= ~mask;
2533                         er->config |= config1 & mask;
2534                         ok = true;
2535                 }
2536         } else {
2537                 if (!atomic_read(&er->ref) ||
2538                                 (er->config == (hwc->config >> 32) &&
2539                                  er->config1 == reg1->config &&
2540                                  er->config2 == reg2->config)) {
2541                         atomic_inc(&er->ref);
2542                         er->config = (hwc->config >> 32);
2543                         er->config1 = reg1->config;
2544                         er->config2 = reg2->config;
2545                         ok = true;
2546                 }
2547         }
2548         raw_spin_unlock_irqrestore(&er->lock, flags);
2549
2550         if (!ok) {
2551                 /*
2552                  * The Rbox events are always in pairs. The paired
2553                  * events are functional identical, but use different
2554                  * extra registers. If we failed to take an extra
2555                  * register, try the alternative.
2556                  */
2557                 if (idx % 2)
2558                         idx--;
2559                 else
2560                         idx++;
2561                 if (idx != reg1->idx % 6) {
2562                         if (idx == 2)
2563                                 config1 >>= 8;
2564                         else if (idx == 3)
2565                                 config1 <<= 8;
2566                         goto again;
2567                 }
2568         } else {
2569                 if (!uncore_box_is_fake(box)) {
2570                         if (idx != reg1->idx % 6)
2571                                 nhmex_rbox_alter_er(box, event);
2572                         reg1->alloc = 1;
2573                 }
2574                 return NULL;
2575         }
2576         return &constraint_empty;
2577 }
2578
2579 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2580 {
2581         struct intel_uncore_extra_reg *er;
2582         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2583         int idx, er_idx;
2584
2585         if (uncore_box_is_fake(box) || !reg1->alloc)
2586                 return;
2587
2588         idx = reg1->idx % 6;
2589         er_idx = idx;
2590         if (er_idx > 2)
2591                 er_idx--;
2592         er_idx += (reg1->idx / 6) * 5;
2593
2594         er = &box->shared_regs[er_idx];
2595         if (idx == 2 || idx == 3)
2596                 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2597         else
2598                 atomic_dec(&er->ref);
2599
2600         reg1->alloc = 0;
2601 }
2602
2603 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2604 {
2605         struct hw_perf_event *hwc = &event->hw;
2606         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2607         struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2608         int idx;
2609
2610         idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
2611                 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2612         if (idx >= 0x18)
2613                 return -EINVAL;
2614
2615         reg1->idx = idx;
2616         reg1->config = event->attr.config1;
2617
2618         switch (idx % 6) {
2619         case 4:
2620         case 5:
2621                 hwc->config |= event->attr.config & (~0ULL << 32);
2622                 reg2->config = event->attr.config2;
2623                 break;
2624         };
2625         return 0;
2626 }
2627
2628 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2629 {
2630         struct hw_perf_event *hwc = &event->hw;
2631         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2632         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2633         int idx, port;
2634
2635         idx = reg1->idx;
2636         port = idx / 6 + box->pmu->pmu_idx * 4;
2637
2638         switch (idx % 6) {
2639         case 0:
2640                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
2641                 break;
2642         case 1:
2643                 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
2644                 break;
2645         case 2:
2646         case 3:
2647                 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
2648                         uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
2649                 break;
2650         case 4:
2651                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
2652                         hwc->config >> 32);
2653                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
2654                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
2655                 break;
2656         case 5:
2657                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
2658                         hwc->config >> 32);
2659                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
2660                 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
2661                 break;
2662         };
2663
2664         wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2665                 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
2666 }
2667
2668 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
2669 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
2670 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
2671 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
2672 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
2673
2674 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
2675         &format_attr_event5.attr,
2676         &format_attr_xbr_mm_cfg.attr,
2677         &format_attr_xbr_match.attr,
2678         &format_attr_xbr_mask.attr,
2679         &format_attr_qlx_cfg.attr,
2680         &format_attr_iperf_cfg.attr,
2681         NULL,
2682 };
2683
2684 static struct attribute_group nhmex_uncore_rbox_format_group = {
2685         .name = "format",
2686         .attrs = nhmex_uncore_rbox_formats_attr,
2687 };
2688
2689 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
2690         INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,         "event=0x0,iperf_cfg=0x80000000"),
2691         INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,         "event=0x6,iperf_cfg=0x80000000"),
2692         INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,         "event=0x0,iperf_cfg=0x40000000"),
2693         INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,         "event=0x6,iperf_cfg=0x40000000"),
2694         INTEL_UNCORE_EVENT_DESC(qpi0_date_response,     "event=0x0,iperf_cfg=0xc4"),
2695         INTEL_UNCORE_EVENT_DESC(qpi1_date_response,     "event=0x6,iperf_cfg=0xc4"),
2696         { /* end: all zeroes */ },
2697 };
2698
2699 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
2700         NHMEX_UNCORE_OPS_COMMON_INIT(),
2701         .enable_event           = nhmex_rbox_msr_enable_event,
2702         .hw_config              = nhmex_rbox_hw_config,
2703         .get_constraint         = nhmex_rbox_get_constraint,
2704         .put_constraint         = nhmex_rbox_put_constraint,
2705 };
2706
2707 static struct intel_uncore_type nhmex_uncore_rbox = {
2708         .name                   = "rbox",
2709         .num_counters           = 8,
2710         .num_boxes              = 2,
2711         .perf_ctr_bits          = 48,
2712         .event_ctl              = NHMEX_R_MSR_PMON_CTL0,
2713         .perf_ctr               = NHMEX_R_MSR_PMON_CNT0,
2714         .event_mask             = NHMEX_R_PMON_RAW_EVENT_MASK,
2715         .box_ctl                = NHMEX_R_MSR_GLOBAL_CTL,
2716         .msr_offset             = NHMEX_R_MSR_OFFSET,
2717         .pair_ctr_ctl           = 1,
2718         .num_shared_regs        = 20,
2719         .event_descs            = nhmex_uncore_rbox_events,
2720         .ops                    = &nhmex_uncore_rbox_ops,
2721         .format_group           = &nhmex_uncore_rbox_format_group
2722 };
2723
2724 static struct intel_uncore_type *nhmex_msr_uncores[] = {
2725         &nhmex_uncore_ubox,
2726         &nhmex_uncore_cbox,
2727         &nhmex_uncore_bbox,
2728         &nhmex_uncore_sbox,
2729         &nhmex_uncore_mbox,
2730         &nhmex_uncore_rbox,
2731         &nhmex_uncore_wbox,
2732         NULL,
2733 };
2734 /* end of Nehalem-EX uncore support */
2735
2736 static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
2737 {
2738         struct hw_perf_event *hwc = &event->hw;
2739
2740         hwc->idx = idx;
2741         hwc->last_tag = ++box->tags[idx];
2742
2743         if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
2744                 hwc->event_base = uncore_fixed_ctr(box);
2745                 hwc->config_base = uncore_fixed_ctl(box);
2746                 return;
2747         }
2748
2749         hwc->config_base = uncore_event_ctl(box, hwc->idx);
2750         hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
2751 }
2752
2753 static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
2754 {
2755         u64 prev_count, new_count, delta;
2756         int shift;
2757
2758         if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
2759                 shift = 64 - uncore_fixed_ctr_bits(box);
2760         else
2761                 shift = 64 - uncore_perf_ctr_bits(box);
2762
2763         /* the hrtimer might modify the previous event value */
2764 again:
2765         prev_count = local64_read(&event->hw.prev_count);
2766         new_count = uncore_read_counter(box, event);
2767         if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
2768                 goto again;
2769
2770         delta = (new_count << shift) - (prev_count << shift);
2771         delta >>= shift;
2772
2773         local64_add(delta, &event->count);
2774 }
2775
2776 /*
2777  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2778  * for SandyBridge. So we use hrtimer to periodically poll the counter
2779  * to avoid overflow.
2780  */
2781 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
2782 {
2783         struct intel_uncore_box *box;
2784         unsigned long flags;
2785         int bit;
2786
2787         box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
2788         if (!box->n_active || box->cpu != smp_processor_id())
2789                 return HRTIMER_NORESTART;
2790         /*
2791          * disable local interrupt to prevent uncore_pmu_event_start/stop
2792          * to interrupt the update process
2793          */
2794         local_irq_save(flags);
2795
2796         for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
2797                 uncore_perf_event_update(box, box->events[bit]);
2798
2799         local_irq_restore(flags);
2800
2801         hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
2802         return HRTIMER_RESTART;
2803 }
2804
2805 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
2806 {
2807         __hrtimer_start_range_ns(&box->hrtimer,
2808                         ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
2809                         HRTIMER_MODE_REL_PINNED, 0);
2810 }
2811
2812 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
2813 {
2814         hrtimer_cancel(&box->hrtimer);
2815 }
2816
2817 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2818 {
2819         hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2820         box->hrtimer.function = uncore_pmu_hrtimer;
2821 }
2822
2823 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
2824 {
2825         struct intel_uncore_box *box;
2826         int i, size;
2827
2828         size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2829
2830         box = kzalloc_node(size, GFP_KERNEL, node);
2831         if (!box)
2832                 return NULL;
2833
2834         for (i = 0; i < type->num_shared_regs; i++)
2835                 raw_spin_lock_init(&box->shared_regs[i].lock);
2836
2837         uncore_pmu_init_hrtimer(box);
2838         atomic_set(&box->refcnt, 1);
2839         box->cpu = -1;
2840         box->phys_id = -1;
2841
2842         return box;
2843 }
2844
2845 static struct intel_uncore_box *
2846 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
2847 {
2848         struct intel_uncore_box *box;
2849
2850         box = *per_cpu_ptr(pmu->box, cpu);
2851         if (box)
2852                 return box;
2853
2854         raw_spin_lock(&uncore_box_lock);
2855         list_for_each_entry(box, &pmu->box_list, list) {
2856                 if (box->phys_id == topology_physical_package_id(cpu)) {
2857                         atomic_inc(&box->refcnt);
2858                         *per_cpu_ptr(pmu->box, cpu) = box;
2859                         break;
2860                 }
2861         }
2862         raw_spin_unlock(&uncore_box_lock);
2863
2864         return *per_cpu_ptr(pmu->box, cpu);
2865 }
2866
2867 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
2868 {
2869         return container_of(event->pmu, struct intel_uncore_pmu, pmu);
2870 }
2871
2872 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
2873 {
2874         /*
2875          * perf core schedules event on the basis of cpu, uncore events are
2876          * collected by one of the cpus inside a physical package.
2877          */
2878         return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
2879 }
2880
2881 static int
2882 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
2883 {
2884         struct perf_event *event;
2885         int n, max_count;
2886
2887         max_count = box->pmu->type->num_counters;
2888         if (box->pmu->type->fixed_ctl)
2889                 max_count++;
2890
2891         if (box->n_events >= max_count)
2892                 return -EINVAL;
2893
2894         n = box->n_events;
2895         box->event_list[n] = leader;
2896         n++;
2897         if (!dogrp)
2898                 return n;
2899
2900         list_for_each_entry(event, &leader->sibling_list, group_entry) {
2901                 if (event->state <= PERF_EVENT_STATE_OFF)
2902                         continue;
2903
2904                 if (n >= max_count)
2905                         return -EINVAL;
2906
2907                 box->event_list[n] = event;
2908                 n++;
2909         }
2910         return n;
2911 }
2912
2913 static struct event_constraint *
2914 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2915 {
2916         struct intel_uncore_type *type = box->pmu->type;
2917         struct event_constraint *c;
2918
2919         if (type->ops->get_constraint) {
2920                 c = type->ops->get_constraint(box, event);
2921                 if (c)
2922                         return c;
2923         }
2924
2925         if (event->attr.config == UNCORE_FIXED_EVENT)
2926                 return &constraint_fixed;
2927
2928         if (type->constraints) {
2929                 for_each_event_constraint(c, type->constraints) {
2930                         if ((event->hw.config & c->cmask) == c->code)
2931                                 return c;
2932                 }
2933         }
2934
2935         return &type->unconstrainted;
2936 }
2937
2938 static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
2939 {
2940         if (box->pmu->type->ops->put_constraint)
2941                 box->pmu->type->ops->put_constraint(box, event);
2942 }
2943
2944 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
2945 {
2946         unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2947         struct event_constraint *c;
2948         int i, wmin, wmax, ret = 0;
2949         struct hw_perf_event *hwc;
2950
2951         bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2952
2953         for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
2954                 hwc = &box->event_list[i]->hw;
2955                 c = uncore_get_event_constraint(box, box->event_list[i]);
2956                 hwc->constraint = c;
2957                 wmin = min(wmin, c->weight);
2958                 wmax = max(wmax, c->weight);
2959         }
2960
2961         /* fastpath, try to reuse previous register */
2962         for (i = 0; i < n; i++) {
2963                 hwc = &box->event_list[i]->hw;
2964                 c = hwc->constraint;
2965
2966                 /* never assigned */
2967                 if (hwc->idx == -1)
2968                         break;
2969
2970                 /* constraint still honored */
2971                 if (!test_bit(hwc->idx, c->idxmsk))
2972                         break;
2973
2974                 /* not already used */
2975                 if (test_bit(hwc->idx, used_mask))
2976                         break;
2977
2978                 __set_bit(hwc->idx, used_mask);
2979                 if (assign)
2980                         assign[i] = hwc->idx;
2981         }
2982         /* slow path */
2983         if (i != n)
2984                 ret = perf_assign_events(box->event_list, n,
2985                                          wmin, wmax, assign);
2986
2987         if (!assign || ret) {
2988                 for (i = 0; i < n; i++)
2989                         uncore_put_event_constraint(box, box->event_list[i]);
2990         }
2991         return ret ? -EINVAL : 0;
2992 }
2993
2994 static void uncore_pmu_event_start(struct perf_event *event, int flags)
2995 {
2996         struct intel_uncore_box *box = uncore_event_to_box(event);
2997         int idx = event->hw.idx;
2998
2999         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3000                 return;
3001
3002         if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3003                 return;
3004
3005         event->hw.state = 0;
3006         box->events[idx] = event;
3007         box->n_active++;
3008         __set_bit(idx, box->active_mask);
3009
3010         local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3011         uncore_enable_event(box, event);
3012
3013         if (box->n_active == 1) {
3014                 uncore_enable_box(box);
3015                 uncore_pmu_start_hrtimer(box);
3016         }
3017 }
3018
3019 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3020 {
3021         struct intel_uncore_box *box = uncore_event_to_box(event);
3022         struct hw_perf_event *hwc = &event->hw;
3023
3024         if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3025                 uncore_disable_event(box, event);
3026                 box->n_active--;
3027                 box->events[hwc->idx] = NULL;
3028                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3029                 hwc->state |= PERF_HES_STOPPED;
3030
3031                 if (box->n_active == 0) {
3032                         uncore_disable_box(box);
3033                         uncore_pmu_cancel_hrtimer(box);
3034                 }
3035         }
3036
3037         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3038                 /*
3039                  * Drain the remaining delta count out of a event
3040                  * that we are disabling:
3041                  */
3042                 uncore_perf_event_update(box, event);
3043                 hwc->state |= PERF_HES_UPTODATE;
3044         }
3045 }
3046
3047 static int uncore_pmu_event_add(struct perf_event *event, int flags)
3048 {
3049         struct intel_uncore_box *box = uncore_event_to_box(event);
3050         struct hw_perf_event *hwc = &event->hw;
3051         int assign[UNCORE_PMC_IDX_MAX];
3052         int i, n, ret;
3053
3054         if (!box)
3055                 return -ENODEV;
3056
3057         ret = n = uncore_collect_events(box, event, false);
3058         if (ret < 0)
3059                 return ret;
3060
3061         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3062         if (!(flags & PERF_EF_START))
3063                 hwc->state |= PERF_HES_ARCH;
3064
3065         ret = uncore_assign_events(box, assign, n);
3066         if (ret)
3067                 return ret;
3068
3069         /* save events moving to new counters */
3070         for (i = 0; i < box->n_events; i++) {
3071                 event = box->event_list[i];
3072                 hwc = &event->hw;
3073
3074                 if (hwc->idx == assign[i] &&
3075                         hwc->last_tag == box->tags[assign[i]])
3076                         continue;
3077                 /*
3078                  * Ensure we don't accidentally enable a stopped
3079                  * counter simply because we rescheduled.
3080                  */
3081                 if (hwc->state & PERF_HES_STOPPED)
3082                         hwc->state |= PERF_HES_ARCH;
3083
3084                 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3085         }
3086
3087         /* reprogram moved events into new counters */
3088         for (i = 0; i < n; i++) {
3089                 event = box->event_list[i];
3090                 hwc = &event->hw;
3091
3092                 if (hwc->idx != assign[i] ||
3093                         hwc->last_tag != box->tags[assign[i]])
3094                         uncore_assign_hw_event(box, event, assign[i]);
3095                 else if (i < box->n_events)
3096                         continue;
3097
3098                 if (hwc->state & PERF_HES_ARCH)
3099                         continue;
3100
3101                 uncore_pmu_event_start(event, 0);
3102         }
3103         box->n_events = n;
3104
3105         return 0;
3106 }
3107
3108 static void uncore_pmu_event_del(struct perf_event *event, int flags)
3109 {
3110         struct intel_uncore_box *box = uncore_event_to_box(event);
3111         int i;
3112
3113         uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3114
3115         for (i = 0; i < box->n_events; i++) {
3116                 if (event == box->event_list[i]) {
3117                         uncore_put_event_constraint(box, event);
3118
3119                         while (++i < box->n_events)
3120                                 box->event_list[i - 1] = box->event_list[i];
3121
3122                         --box->n_events;
3123                         break;
3124                 }
3125         }
3126
3127         event->hw.idx = -1;
3128         event->hw.last_tag = ~0ULL;
3129 }
3130
3131 static void uncore_pmu_event_read(struct perf_event *event)
3132 {
3133         struct intel_uncore_box *box = uncore_event_to_box(event);
3134         uncore_perf_event_update(box, event);
3135 }
3136
3137 /*
3138  * validation ensures the group can be loaded onto the
3139  * PMU if it was the only group available.
3140  */
3141 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3142                                 struct perf_event *event)
3143 {
3144         struct perf_event *leader = event->group_leader;
3145         struct intel_uncore_box *fake_box;
3146         int ret = -EINVAL, n;
3147
3148         fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3149         if (!fake_box)
3150                 return -ENOMEM;
3151
3152         fake_box->pmu = pmu;
3153         /*
3154          * the event is not yet connected with its
3155          * siblings therefore we must first collect
3156          * existing siblings, then add the new event
3157          * before we can simulate the scheduling
3158          */
3159         n = uncore_collect_events(fake_box, leader, true);
3160         if (n < 0)
3161                 goto out;
3162
3163         fake_box->n_events = n;
3164         n = uncore_collect_events(fake_box, event, false);
3165         if (n < 0)
3166                 goto out;
3167
3168         fake_box->n_events = n;
3169
3170         ret = uncore_assign_events(fake_box, NULL, n);
3171 out:
3172         kfree(fake_box);
3173         return ret;
3174 }
3175
3176 static int uncore_pmu_event_init(struct perf_event *event)
3177 {
3178         struct intel_uncore_pmu *pmu;
3179         struct intel_uncore_box *box;
3180         struct hw_perf_event *hwc = &event->hw;
3181         int ret;
3182
3183         if (event->attr.type != event->pmu->type)
3184                 return -ENOENT;
3185
3186         pmu = uncore_event_to_pmu(event);
3187         /* no device found for this pmu */
3188         if (pmu->func_id < 0)
3189                 return -ENOENT;
3190
3191         /*
3192          * Uncore PMU does measure at all privilege level all the time.
3193          * So it doesn't make sense to specify any exclude bits.
3194          */
3195         if (event->attr.exclude_user || event->attr.exclude_kernel ||
3196                         event->attr.exclude_hv || event->attr.exclude_idle)
3197                 return -EINVAL;
3198
3199         /* Sampling not supported yet */
3200         if (hwc->sample_period)
3201                 return -EINVAL;
3202
3203         /*
3204          * Place all uncore events for a particular physical package
3205          * onto a single cpu
3206          */
3207         if (event->cpu < 0)
3208                 return -EINVAL;
3209         box = uncore_pmu_to_box(pmu, event->cpu);
3210         if (!box || box->cpu < 0)
3211                 return -EINVAL;
3212         event->cpu = box->cpu;
3213
3214         event->hw.idx = -1;
3215         event->hw.last_tag = ~0ULL;
3216         event->hw.extra_reg.idx = EXTRA_REG_NONE;
3217         event->hw.branch_reg.idx = EXTRA_REG_NONE;
3218
3219         if (event->attr.config == UNCORE_FIXED_EVENT) {
3220                 /* no fixed counter */
3221                 if (!pmu->type->fixed_ctl)
3222                         return -EINVAL;
3223                 /*
3224                  * if there is only one fixed counter, only the first pmu
3225                  * can access the fixed counter
3226                  */
3227                 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3228                         return -EINVAL;
3229
3230                 /* fixed counters have event field hardcoded to zero */
3231                 hwc->config = 0ULL;
3232         } else {
3233                 hwc->config = event->attr.config & pmu->type->event_mask;
3234                 if (pmu->type->ops->hw_config) {
3235                         ret = pmu->type->ops->hw_config(box, event);
3236                         if (ret)
3237                                 return ret;
3238                 }
3239         }
3240
3241         if (event->group_leader != event)
3242                 ret = uncore_validate_group(pmu, event);
3243         else
3244                 ret = 0;
3245
3246         return ret;
3247 }
3248
3249 static ssize_t uncore_get_attr_cpumask(struct device *dev,
3250                                 struct device_attribute *attr, char *buf)
3251 {
3252         int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3253
3254         buf[n++] = '\n';
3255         buf[n] = '\0';
3256         return n;
3257 }
3258
3259 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3260
3261 static struct attribute *uncore_pmu_attrs[] = {
3262         &dev_attr_cpumask.attr,
3263         NULL,
3264 };
3265
3266 static struct attribute_group uncore_pmu_attr_group = {
3267         .attrs = uncore_pmu_attrs,
3268 };
3269
3270 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3271 {
3272         int ret;
3273
3274         pmu->pmu = (struct pmu) {
3275                 .attr_groups    = pmu->type->attr_groups,
3276                 .task_ctx_nr    = perf_invalid_context,
3277                 .event_init     = uncore_pmu_event_init,
3278                 .add            = uncore_pmu_event_add,
3279                 .del            = uncore_pmu_event_del,
3280                 .start          = uncore_pmu_event_start,
3281                 .stop           = uncore_pmu_event_stop,
3282                 .read           = uncore_pmu_event_read,
3283         };
3284
3285         if (pmu->type->num_boxes == 1) {
3286                 if (strlen(pmu->type->name) > 0)
3287                         sprintf(pmu->name, "uncore_%s", pmu->type->name);
3288                 else
3289                         sprintf(pmu->name, "uncore");
3290         } else {
3291                 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3292                         pmu->pmu_idx);
3293         }
3294
3295         ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3296         return ret;
3297 }
3298
3299 static void __init uncore_type_exit(struct intel_uncore_type *type)
3300 {
3301         int i;
3302
3303         for (i = 0; i < type->num_boxes; i++)
3304                 free_percpu(type->pmus[i].box);
3305         kfree(type->pmus);
3306         type->pmus = NULL;
3307         kfree(type->events_group);
3308         type->events_group = NULL;
3309 }
3310
3311 static void __init uncore_types_exit(struct intel_uncore_type **types)
3312 {
3313         int i;
3314         for (i = 0; types[i]; i++)
3315                 uncore_type_exit(types[i]);
3316 }
3317
3318 static int __init uncore_type_init(struct intel_uncore_type *type)
3319 {
3320         struct intel_uncore_pmu *pmus;
3321         struct attribute_group *attr_group;
3322         struct attribute **attrs;
3323         int i, j;
3324
3325         pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3326         if (!pmus)
3327                 return -ENOMEM;
3328
3329         type->unconstrainted = (struct event_constraint)
3330                 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
3331                                 0, type->num_counters, 0, 0);
3332
3333         for (i = 0; i < type->num_boxes; i++) {
3334                 pmus[i].func_id = -1;
3335                 pmus[i].pmu_idx = i;
3336                 pmus[i].type = type;
3337                 INIT_LIST_HEAD(&pmus[i].box_list);
3338                 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3339                 if (!pmus[i].box)
3340                         goto fail;
3341         }
3342
3343         if (type->event_descs) {
3344                 i = 0;
3345                 while (type->event_descs[i].attr.attr.name)
3346                         i++;
3347
3348                 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3349                                         sizeof(*attr_group), GFP_KERNEL);
3350                 if (!attr_group)
3351                         goto fail;
3352
3353                 attrs = (struct attribute **)(attr_group + 1);
3354                 attr_group->name = "events";
3355                 attr_group->attrs = attrs;
3356
3357                 for (j = 0; j < i; j++)
3358                         attrs[j] = &type->event_descs[j].attr.attr;
3359
3360                 type->events_group = attr_group;
3361         }
3362
3363         type->pmu_group = &uncore_pmu_attr_group;
3364         type->pmus = pmus;
3365         return 0;
3366 fail:
3367         uncore_type_exit(type);
3368         return -ENOMEM;
3369 }
3370
3371 static int __init uncore_types_init(struct intel_uncore_type **types)
3372 {
3373         int i, ret;
3374
3375         for (i = 0; types[i]; i++) {
3376                 ret = uncore_type_init(types[i]);
3377                 if (ret)
3378                         goto fail;
3379         }
3380         return 0;
3381 fail:
3382         while (--i >= 0)
3383                 uncore_type_exit(types[i]);
3384         return ret;
3385 }
3386
3387 static struct pci_driver *uncore_pci_driver;
3388 static bool pcidrv_registered;
3389
3390 /*
3391  * add a pci uncore device
3392  */
3393 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3394 {
3395         struct intel_uncore_pmu *pmu;
3396         struct intel_uncore_box *box;
3397         struct intel_uncore_type *type;
3398         int phys_id;
3399
3400         phys_id = pcibus_to_physid[pdev->bus->number];
3401         if (phys_id < 0)
3402                 return -ENODEV;
3403
3404         if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3405                 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3406                 pci_set_drvdata(pdev, NULL);
3407                 return 0;
3408         }
3409
3410         type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3411         box = uncore_alloc_box(type, NUMA_NO_NODE);
3412         if (!box)
3413                 return -ENOMEM;
3414
3415         /*
3416          * for performance monitoring unit with multiple boxes,
3417          * each box has a different function id.
3418          */
3419         pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3420         if (pmu->func_id < 0)
3421                 pmu->func_id = pdev->devfn;
3422         else
3423                 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
3424
3425         box->phys_id = phys_id;
3426         box->pci_dev = pdev;
3427         box->pmu = pmu;
3428         uncore_box_init(box);
3429         pci_set_drvdata(pdev, box);
3430
3431         raw_spin_lock(&uncore_box_lock);
3432         list_add_tail(&box->list, &pmu->box_list);
3433         raw_spin_unlock(&uncore_box_lock);
3434
3435         return 0;
3436 }
3437
3438 static void uncore_pci_remove(struct pci_dev *pdev)
3439 {
3440         struct intel_uncore_box *box = pci_get_drvdata(pdev);
3441         struct intel_uncore_pmu *pmu;
3442         int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3443
3444         box = pci_get_drvdata(pdev);
3445         if (!box) {
3446                 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3447                         if (extra_pci_dev[phys_id][i] == pdev) {
3448                                 extra_pci_dev[phys_id][i] = NULL;
3449                                 break;
3450                         }
3451                 }
3452                 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3453                 return;
3454         }
3455
3456         pmu = box->pmu;
3457         if (WARN_ON_ONCE(phys_id != box->phys_id))
3458                 return;
3459
3460         pci_set_drvdata(pdev, NULL);
3461
3462         raw_spin_lock(&uncore_box_lock);
3463         list_del(&box->list);
3464         raw_spin_unlock(&uncore_box_lock);
3465
3466         for_each_possible_cpu(cpu) {
3467                 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3468                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3469                         atomic_dec(&box->refcnt);
3470                 }
3471         }
3472
3473         WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3474         kfree(box);
3475 }
3476
3477 static int __init uncore_pci_init(void)
3478 {
3479         int ret;
3480
3481         switch (boot_cpu_data.x86_model) {
3482         case 45: /* Sandy Bridge-EP */
3483                 ret = snbep_pci2phy_map_init(0x3ce0);
3484                 if (ret)
3485                         return ret;
3486                 pci_uncores = snbep_pci_uncores;
3487                 uncore_pci_driver = &snbep_uncore_pci_driver;
3488                 break;
3489         case 62: /* IvyTown */
3490                 ret = snbep_pci2phy_map_init(0x0e1e);
3491                 if (ret)
3492                         return ret;
3493                 pci_uncores = ivt_pci_uncores;
3494                 uncore_pci_driver = &ivt_uncore_pci_driver;
3495                 break;
3496         default:
3497                 return 0;
3498         }
3499
3500         ret = uncore_types_init(pci_uncores);
3501         if (ret)
3502                 return ret;
3503
3504         uncore_pci_driver->probe = uncore_pci_probe;
3505         uncore_pci_driver->remove = uncore_pci_remove;
3506
3507         ret = pci_register_driver(uncore_pci_driver);
3508         if (ret == 0)
3509                 pcidrv_registered = true;
3510         else
3511                 uncore_types_exit(pci_uncores);
3512
3513         return ret;
3514 }
3515
3516 static void __init uncore_pci_exit(void)
3517 {
3518         if (pcidrv_registered) {
3519                 pcidrv_registered = false;
3520                 pci_unregister_driver(uncore_pci_driver);
3521                 uncore_types_exit(pci_uncores);
3522         }
3523 }
3524
3525 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3526 static LIST_HEAD(boxes_to_free);
3527
3528 static void uncore_kfree_boxes(void)
3529 {
3530         struct intel_uncore_box *box;
3531
3532         while (!list_empty(&boxes_to_free)) {
3533                 box = list_entry(boxes_to_free.next,
3534                                  struct intel_uncore_box, list);
3535                 list_del(&box->list);
3536                 kfree(box);
3537         }
3538 }
3539
3540 static void uncore_cpu_dying(int cpu)
3541 {
3542         struct intel_uncore_type *type;
3543         struct intel_uncore_pmu *pmu;
3544         struct intel_uncore_box *box;
3545         int i, j;
3546
3547         for (i = 0; msr_uncores[i]; i++) {
3548                 type = msr_uncores[i];
3549                 for (j = 0; j < type->num_boxes; j++) {
3550                         pmu = &type->pmus[j];
3551                         box = *per_cpu_ptr(pmu->box, cpu);
3552                         *per_cpu_ptr(pmu->box, cpu) = NULL;
3553                         if (box && atomic_dec_and_test(&box->refcnt))
3554                                 list_add(&box->list, &boxes_to_free);
3555                 }
3556         }
3557 }
3558
3559 static int uncore_cpu_starting(int cpu)
3560 {
3561         struct intel_uncore_type *type;
3562         struct intel_uncore_pmu *pmu;
3563         struct intel_uncore_box *box, *exist;
3564         int i, j, k, phys_id;
3565
3566         phys_id = topology_physical_package_id(cpu);
3567
3568         for (i = 0; msr_uncores[i]; i++) {
3569                 type = msr_uncores[i];
3570                 for (j = 0; j < type->num_boxes; j++) {
3571                         pmu = &type->pmus[j];
3572                         box = *per_cpu_ptr(pmu->box, cpu);
3573                         /* called by uncore_cpu_init? */
3574                         if (box && box->phys_id >= 0) {
3575                                 uncore_box_init(box);
3576                                 continue;
3577                         }
3578
3579                         for_each_online_cpu(k) {
3580                                 exist = *per_cpu_ptr(pmu->box, k);
3581                                 if (exist && exist->phys_id == phys_id) {
3582                                         atomic_inc(&exist->refcnt);
3583                                         *per_cpu_ptr(pmu->box, cpu) = exist;
3584                                         if (box) {
3585                                                 list_add(&box->list,
3586                                                          &boxes_to_free);
3587                                                 box = NULL;
3588                                         }
3589                                         break;
3590                                 }
3591                         }
3592
3593                         if (box) {
3594                                 box->phys_id = phys_id;
3595                                 uncore_box_init(box);
3596                         }
3597                 }
3598         }
3599         return 0;
3600 }
3601
3602 static int uncore_cpu_prepare(int cpu, int phys_id)
3603 {
3604         struct intel_uncore_type *type;
3605         struct intel_uncore_pmu *pmu;
3606         struct intel_uncore_box *box;
3607         int i, j;
3608
3609         for (i = 0; msr_uncores[i]; i++) {
3610                 type = msr_uncores[i];
3611                 for (j = 0; j < type->num_boxes; j++) {
3612                         pmu = &type->pmus[j];
3613                         if (pmu->func_id < 0)
3614                                 pmu->func_id = j;
3615
3616                         box = uncore_alloc_box(type, cpu_to_node(cpu));
3617                         if (!box)
3618                                 return -ENOMEM;
3619
3620                         box->pmu = pmu;
3621                         box->phys_id = phys_id;
3622                         *per_cpu_ptr(pmu->box, cpu) = box;
3623                 }
3624         }
3625         return 0;
3626 }
3627
3628 static void
3629 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
3630 {
3631         struct intel_uncore_type *type;
3632         struct intel_uncore_pmu *pmu;
3633         struct intel_uncore_box *box;
3634         int i, j;
3635
3636         for (i = 0; uncores[i]; i++) {
3637                 type = uncores[i];
3638                 for (j = 0; j < type->num_boxes; j++) {
3639                         pmu = &type->pmus[j];
3640                         if (old_cpu < 0)
3641                                 box = uncore_pmu_to_box(pmu, new_cpu);
3642                         else
3643                                 box = uncore_pmu_to_box(pmu, old_cpu);
3644                         if (!box)
3645                                 continue;
3646
3647                         if (old_cpu < 0) {
3648                                 WARN_ON_ONCE(box->cpu != -1);
3649                                 box->cpu = new_cpu;
3650                                 continue;
3651                         }
3652
3653                         WARN_ON_ONCE(box->cpu != old_cpu);
3654                         if (new_cpu >= 0) {
3655                                 uncore_pmu_cancel_hrtimer(box);
3656                                 perf_pmu_migrate_context(&pmu->pmu,
3657                                                 old_cpu, new_cpu);
3658                                 box->cpu = new_cpu;
3659                         } else {
3660                                 box->cpu = -1;
3661                         }
3662                 }
3663         }
3664 }
3665
3666 static void uncore_event_exit_cpu(int cpu)
3667 {
3668         int i, phys_id, target;
3669
3670         /* if exiting cpu is used for collecting uncore events */
3671         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
3672                 return;
3673
3674         /* find a new cpu to collect uncore events */
3675         phys_id = topology_physical_package_id(cpu);
3676         target = -1;
3677         for_each_online_cpu(i) {
3678                 if (i == cpu)
3679                         continue;
3680                 if (phys_id == topology_physical_package_id(i)) {
3681                         target = i;
3682                         break;
3683                 }
3684         }
3685
3686         /* migrate uncore events to the new cpu */
3687         if (target >= 0)
3688                 cpumask_set_cpu(target, &uncore_cpu_mask);
3689
3690         uncore_change_context(msr_uncores, cpu, target);
3691         uncore_change_context(pci_uncores, cpu, target);
3692 }
3693
3694 static void uncore_event_init_cpu(int cpu)
3695 {
3696         int i, phys_id;
3697
3698         phys_id = topology_physical_package_id(cpu);
3699         for_each_cpu(i, &uncore_cpu_mask) {
3700                 if (phys_id == topology_physical_package_id(i))
3701                         return;
3702         }
3703
3704         cpumask_set_cpu(cpu, &uncore_cpu_mask);
3705
3706         uncore_change_context(msr_uncores, -1, cpu);
3707         uncore_change_context(pci_uncores, -1, cpu);
3708 }
3709
3710 static int uncore_cpu_notifier(struct notifier_block *self,
3711                                unsigned long action, void *hcpu)
3712 {
3713         unsigned int cpu = (long)hcpu;
3714
3715         /* allocate/free data structure for uncore box */
3716         switch (action & ~CPU_TASKS_FROZEN) {
3717         case CPU_UP_PREPARE:
3718                 uncore_cpu_prepare(cpu, -1);
3719                 break;
3720         case CPU_STARTING:
3721                 uncore_cpu_starting(cpu);
3722                 break;
3723         case CPU_UP_CANCELED:
3724         case CPU_DYING:
3725                 uncore_cpu_dying(cpu);
3726                 break;
3727         case CPU_ONLINE:
3728         case CPU_DEAD:
3729                 uncore_kfree_boxes();
3730                 break;
3731         default:
3732                 break;
3733         }
3734
3735         /* select the cpu that collects uncore events */
3736         switch (action & ~CPU_TASKS_FROZEN) {
3737         case CPU_DOWN_FAILED:
3738         case CPU_STARTING:
3739                 uncore_event_init_cpu(cpu);
3740                 break;
3741         case CPU_DOWN_PREPARE:
3742                 uncore_event_exit_cpu(cpu);
3743                 break;
3744         default:
3745                 break;
3746         }
3747
3748         return NOTIFY_OK;
3749 }
3750
3751 static struct notifier_block uncore_cpu_nb = {
3752         .notifier_call  = uncore_cpu_notifier,
3753         /*
3754          * to migrate uncore events, our notifier should be executed
3755          * before perf core's notifier.
3756          */
3757         .priority       = CPU_PRI_PERF + 1,
3758 };
3759
3760 static void __init uncore_cpu_setup(void *dummy)
3761 {
3762         uncore_cpu_starting(smp_processor_id());
3763 }
3764
3765 static int __init uncore_cpu_init(void)
3766 {
3767         int ret, cpu, max_cores;
3768
3769         max_cores = boot_cpu_data.x86_max_cores;
3770         switch (boot_cpu_data.x86_model) {
3771         case 26: /* Nehalem */
3772         case 30:
3773         case 37: /* Westmere */
3774         case 44:
3775                 msr_uncores = nhm_msr_uncores;
3776                 break;
3777         case 42: /* Sandy Bridge */
3778         case 58: /* Ivy Bridge */
3779                 if (snb_uncore_cbox.num_boxes > max_cores)
3780                         snb_uncore_cbox.num_boxes = max_cores;
3781                 msr_uncores = snb_msr_uncores;
3782                 break;
3783         case 45: /* Sandy Bridge-EP */
3784                 if (snbep_uncore_cbox.num_boxes > max_cores)
3785                         snbep_uncore_cbox.num_boxes = max_cores;
3786                 msr_uncores = snbep_msr_uncores;
3787                 break;
3788         case 46: /* Nehalem-EX */
3789                 uncore_nhmex = true;
3790         case 47: /* Westmere-EX aka. Xeon E7 */
3791                 if (!uncore_nhmex)
3792                         nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
3793                 if (nhmex_uncore_cbox.num_boxes > max_cores)
3794                         nhmex_uncore_cbox.num_boxes = max_cores;
3795                 msr_uncores = nhmex_msr_uncores;
3796                 break;
3797         case 62: /* IvyTown */
3798                 if (ivt_uncore_cbox.num_boxes > max_cores)
3799                         ivt_uncore_cbox.num_boxes = max_cores;
3800                 msr_uncores = ivt_msr_uncores;
3801                 break;
3802
3803         default:
3804                 return 0;
3805         }
3806
3807         ret = uncore_types_init(msr_uncores);
3808         if (ret)
3809                 return ret;
3810
3811         get_online_cpus();
3812
3813         for_each_online_cpu(cpu) {
3814                 int i, phys_id = topology_physical_package_id(cpu);
3815
3816                 for_each_cpu(i, &uncore_cpu_mask) {
3817                         if (phys_id == topology_physical_package_id(i)) {
3818                                 phys_id = -1;
3819                                 break;
3820                         }
3821                 }
3822                 if (phys_id < 0)
3823                         continue;
3824
3825                 uncore_cpu_prepare(cpu, phys_id);
3826                 uncore_event_init_cpu(cpu);
3827         }
3828         on_each_cpu(uncore_cpu_setup, NULL, 1);
3829
3830         register_cpu_notifier(&uncore_cpu_nb);
3831
3832         put_online_cpus();
3833
3834         return 0;
3835 }
3836
3837 static int __init uncore_pmus_register(void)
3838 {
3839         struct intel_uncore_pmu *pmu;
3840         struct intel_uncore_type *type;
3841         int i, j;
3842
3843         for (i = 0; msr_uncores[i]; i++) {
3844                 type = msr_uncores[i];
3845                 for (j = 0; j < type->num_boxes; j++) {
3846                         pmu = &type->pmus[j];
3847                         uncore_pmu_register(pmu);
3848                 }
3849         }
3850
3851         for (i = 0; pci_uncores[i]; i++) {
3852                 type = pci_uncores[i];
3853                 for (j = 0; j < type->num_boxes; j++) {
3854                         pmu = &type->pmus[j];
3855                         uncore_pmu_register(pmu);
3856                 }
3857         }
3858
3859         return 0;
3860 }
3861
3862 static int __init intel_uncore_init(void)
3863 {
3864         int ret;
3865
3866         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3867                 return -ENODEV;
3868
3869         if (cpu_has_hypervisor)
3870                 return -ENODEV;
3871
3872         ret = uncore_pci_init();
3873         if (ret)
3874                 goto fail;
3875         ret = uncore_cpu_init();
3876         if (ret) {
3877                 uncore_pci_exit();
3878                 goto fail;
3879         }
3880
3881         uncore_pmus_register();
3882         return 0;
3883 fail:
3884         return ret;
3885 }
3886 device_initcall(intel_uncore_init);