Merge tag 'xtensa-for-next-20141021-2' of git://github.com/jcmvbkbc/linux-xtensa...
[cascardo/linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "perf_event_intel_uncore.h"
3
4
5 /* SNB-EP Box level control */
6 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
7 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
8 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
9 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
10 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
11                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
12                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
13 /* SNB-EP event control */
14 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
15 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
16 #define SNBEP_PMON_CTL_RST              (1 << 17)
17 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
18 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
19 #define SNBEP_PMON_CTL_EN               (1 << 22)
20 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
21 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
22 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
23                                          SNBEP_PMON_CTL_UMASK_MASK | \
24                                          SNBEP_PMON_CTL_EDGE_DET | \
25                                          SNBEP_PMON_CTL_INVERT | \
26                                          SNBEP_PMON_CTL_TRESH_MASK)
27
28 /* SNB-EP Ubox event control */
29 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
30 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
31                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
32                                  SNBEP_PMON_CTL_UMASK_MASK | \
33                                  SNBEP_PMON_CTL_EDGE_DET | \
34                                  SNBEP_PMON_CTL_INVERT | \
35                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
36
37 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
38 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
39                                                  SNBEP_CBO_PMON_CTL_TID_EN)
40
41 /* SNB-EP PCU event control */
42 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
43 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
45 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
46 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
47                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
48                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
49                                  SNBEP_PMON_CTL_EDGE_DET | \
50                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
51                                  SNBEP_PMON_CTL_INVERT | \
52                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
53                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
54                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
55
56 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
57                                 (SNBEP_PMON_RAW_EVENT_MASK | \
58                                  SNBEP_PMON_CTL_EV_SEL_EXT)
59
60 /* SNB-EP pci control register */
61 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
62 #define SNBEP_PCI_PMON_CTL0                     0xd8
63 /* SNB-EP pci counter register */
64 #define SNBEP_PCI_PMON_CTR0                     0xa0
65
66 /* SNB-EP home agent register */
67 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
68 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
69 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
70 /* SNB-EP memory controller register */
71 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
72 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
73 /* SNB-EP QPI register */
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
78
79 /* SNB-EP Ubox register */
80 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
81 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
82
83 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
84 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
85
86 /* SNB-EP Cbo register */
87 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
88 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
89 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
90 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
91 #define SNBEP_CBO_MSR_OFFSET                    0x20
92
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
97
98 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
99         .event = (e),                           \
100         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
101         .config_mask = (m),                     \
102         .idx = (i)                              \
103 }
104
105 /* SNB-EP PCU register */
106 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
107 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
108 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
109 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
110 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
111 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
112 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
113
114 /* IVBEP event control */
115 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
116                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
117 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
118                                          SNBEP_PMON_CTL_UMASK_MASK | \
119                                          SNBEP_PMON_CTL_EDGE_DET | \
120                                          SNBEP_PMON_CTL_TRESH_MASK)
121 /* IVBEP Ubox */
122 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
123 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
124 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
125
126 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
127                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
128                                  SNBEP_PMON_CTL_UMASK_MASK | \
129                                  SNBEP_PMON_CTL_EDGE_DET | \
130                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
131 /* IVBEP Cbo */
132 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
133                                                  SNBEP_CBO_PMON_CTL_TID_EN)
134
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
143
144 /* IVBEP home agent */
145 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
146 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
147                                 (IVBEP_PMON_RAW_EVENT_MASK | \
148                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
149 /* IVBEP PCU */
150 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
151                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
152                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
153                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
154                                  SNBEP_PMON_CTL_EDGE_DET | \
155                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
156                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
157                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
158 /* IVBEP QPI */
159 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
160                                 (IVBEP_PMON_RAW_EVENT_MASK | \
161                                  SNBEP_PMON_CTL_EV_SEL_EXT)
162
163 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
164                                 ((1ULL << (n)) - 1)))
165
166 /* Haswell-EP Ubox */
167 #define HSWEP_U_MSR_PMON_CTR0                   0x705
168 #define HSWEP_U_MSR_PMON_CTL0                   0x709
169 #define HSWEP_U_MSR_PMON_FILTER                 0x707
170
171 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
172 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
173
174 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
175 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
177                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
178                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
179
180 /* Haswell-EP CBo */
181 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
182 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
183 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
184 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
185 #define HSWEP_CBO_MSR_OFFSET                    0x10
186
187
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
196
197
198 /* Haswell-EP Sbox */
199 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
200 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
201 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
202 #define HSWEP_SBOX_MSR_OFFSET                   0xa
203 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
204                                                  SNBEP_CBO_PMON_CTL_TID_EN)
205
206 /* Haswell-EP PCU */
207 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
208 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
209 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
210 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
211
212
213 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
214 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
215 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
216 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
217 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
218 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
219 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
220 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
221 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
222 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
223 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
224 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
225 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
226 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
227 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
228 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
229 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
230 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
231 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
232 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
233 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
234 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
235 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
236 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
237 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
238 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
239 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
240 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
241 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
242 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
243 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
244 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
245 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
246 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
247 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
248 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
249 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
250 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
251 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
252 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
253 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
254 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
255 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
256 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
257 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
258 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
259 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
260 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
261 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
262
263 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
264 {
265         struct pci_dev *pdev = box->pci_dev;
266         int box_ctl = uncore_pci_box_ctl(box);
267         u32 config = 0;
268
269         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
270                 config |= SNBEP_PMON_BOX_CTL_FRZ;
271                 pci_write_config_dword(pdev, box_ctl, config);
272         }
273 }
274
275 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
276 {
277         struct pci_dev *pdev = box->pci_dev;
278         int box_ctl = uncore_pci_box_ctl(box);
279         u32 config = 0;
280
281         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
282                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
283                 pci_write_config_dword(pdev, box_ctl, config);
284         }
285 }
286
287 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
288 {
289         struct pci_dev *pdev = box->pci_dev;
290         struct hw_perf_event *hwc = &event->hw;
291
292         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
293 }
294
295 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
296 {
297         struct pci_dev *pdev = box->pci_dev;
298         struct hw_perf_event *hwc = &event->hw;
299
300         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
301 }
302
303 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
304 {
305         struct pci_dev *pdev = box->pci_dev;
306         struct hw_perf_event *hwc = &event->hw;
307         u64 count = 0;
308
309         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
310         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
311
312         return count;
313 }
314
315 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
316 {
317         struct pci_dev *pdev = box->pci_dev;
318
319         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
320 }
321
322 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
323 {
324         u64 config;
325         unsigned msr;
326
327         msr = uncore_msr_box_ctl(box);
328         if (msr) {
329                 rdmsrl(msr, config);
330                 config |= SNBEP_PMON_BOX_CTL_FRZ;
331                 wrmsrl(msr, config);
332         }
333 }
334
335 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
336 {
337         u64 config;
338         unsigned msr;
339
340         msr = uncore_msr_box_ctl(box);
341         if (msr) {
342                 rdmsrl(msr, config);
343                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
344                 wrmsrl(msr, config);
345         }
346 }
347
348 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
349 {
350         struct hw_perf_event *hwc = &event->hw;
351         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
352
353         if (reg1->idx != EXTRA_REG_NONE)
354                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
355
356         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
357 }
358
359 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
360                                         struct perf_event *event)
361 {
362         struct hw_perf_event *hwc = &event->hw;
363
364         wrmsrl(hwc->config_base, hwc->config);
365 }
366
367 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
368 {
369         unsigned msr = uncore_msr_box_ctl(box);
370
371         if (msr)
372                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
373 }
374
375 static struct attribute *snbep_uncore_formats_attr[] = {
376         &format_attr_event.attr,
377         &format_attr_umask.attr,
378         &format_attr_edge.attr,
379         &format_attr_inv.attr,
380         &format_attr_thresh8.attr,
381         NULL,
382 };
383
384 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
385         &format_attr_event.attr,
386         &format_attr_umask.attr,
387         &format_attr_edge.attr,
388         &format_attr_inv.attr,
389         &format_attr_thresh5.attr,
390         NULL,
391 };
392
393 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
394         &format_attr_event.attr,
395         &format_attr_umask.attr,
396         &format_attr_edge.attr,
397         &format_attr_tid_en.attr,
398         &format_attr_inv.attr,
399         &format_attr_thresh8.attr,
400         &format_attr_filter_tid.attr,
401         &format_attr_filter_nid.attr,
402         &format_attr_filter_state.attr,
403         &format_attr_filter_opc.attr,
404         NULL,
405 };
406
407 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
408         &format_attr_event_ext.attr,
409         &format_attr_occ_sel.attr,
410         &format_attr_edge.attr,
411         &format_attr_inv.attr,
412         &format_attr_thresh5.attr,
413         &format_attr_occ_invert.attr,
414         &format_attr_occ_edge.attr,
415         &format_attr_filter_band0.attr,
416         &format_attr_filter_band1.attr,
417         &format_attr_filter_band2.attr,
418         &format_attr_filter_band3.attr,
419         NULL,
420 };
421
422 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
423         &format_attr_event_ext.attr,
424         &format_attr_umask.attr,
425         &format_attr_edge.attr,
426         &format_attr_inv.attr,
427         &format_attr_thresh8.attr,
428         &format_attr_match_rds.attr,
429         &format_attr_match_rnid30.attr,
430         &format_attr_match_rnid4.attr,
431         &format_attr_match_dnid.attr,
432         &format_attr_match_mc.attr,
433         &format_attr_match_opc.attr,
434         &format_attr_match_vnw.attr,
435         &format_attr_match0.attr,
436         &format_attr_match1.attr,
437         &format_attr_mask_rds.attr,
438         &format_attr_mask_rnid30.attr,
439         &format_attr_mask_rnid4.attr,
440         &format_attr_mask_dnid.attr,
441         &format_attr_mask_mc.attr,
442         &format_attr_mask_opc.attr,
443         &format_attr_mask_vnw.attr,
444         &format_attr_mask0.attr,
445         &format_attr_mask1.attr,
446         NULL,
447 };
448
449 static struct uncore_event_desc snbep_uncore_imc_events[] = {
450         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
451         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
452         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
453         { /* end: all zeroes */ },
454 };
455
456 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
457         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
458         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
459         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
460         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
461         { /* end: all zeroes */ },
462 };
463
464 static struct attribute_group snbep_uncore_format_group = {
465         .name = "format",
466         .attrs = snbep_uncore_formats_attr,
467 };
468
469 static struct attribute_group snbep_uncore_ubox_format_group = {
470         .name = "format",
471         .attrs = snbep_uncore_ubox_formats_attr,
472 };
473
474 static struct attribute_group snbep_uncore_cbox_format_group = {
475         .name = "format",
476         .attrs = snbep_uncore_cbox_formats_attr,
477 };
478
479 static struct attribute_group snbep_uncore_pcu_format_group = {
480         .name = "format",
481         .attrs = snbep_uncore_pcu_formats_attr,
482 };
483
484 static struct attribute_group snbep_uncore_qpi_format_group = {
485         .name = "format",
486         .attrs = snbep_uncore_qpi_formats_attr,
487 };
488
489 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
490         .init_box       = snbep_uncore_msr_init_box,            \
491         .disable_box    = snbep_uncore_msr_disable_box,         \
492         .enable_box     = snbep_uncore_msr_enable_box,          \
493         .disable_event  = snbep_uncore_msr_disable_event,       \
494         .enable_event   = snbep_uncore_msr_enable_event,        \
495         .read_counter   = uncore_msr_read_counter
496
497 static struct intel_uncore_ops snbep_uncore_msr_ops = {
498         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
499 };
500
501 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
502         .init_box       = snbep_uncore_pci_init_box,            \
503         .disable_box    = snbep_uncore_pci_disable_box,         \
504         .enable_box     = snbep_uncore_pci_enable_box,          \
505         .disable_event  = snbep_uncore_pci_disable_event,       \
506         .read_counter   = snbep_uncore_pci_read_counter
507
508 static struct intel_uncore_ops snbep_uncore_pci_ops = {
509         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
510         .enable_event   = snbep_uncore_pci_enable_event,        \
511 };
512
513 static struct event_constraint snbep_uncore_cbox_constraints[] = {
514         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
515         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
516         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
517         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
518         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
519         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
520         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
521         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
522         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
523         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
524         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
525         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
526         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
527         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
528         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
529         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
530         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
531         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
532         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
533         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
534         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
535         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
536         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
537         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
538         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
539         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
540         EVENT_CONSTRAINT_END
541 };
542
543 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
544         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
545         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
546         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
547         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
548         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
549         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
550         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
551         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
552         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
553         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
554         EVENT_CONSTRAINT_END
555 };
556
557 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
558         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
559         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
560         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
561         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
562         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
563         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
564         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
565         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
566         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
567         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
568         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
569         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
570         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
571         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
572         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
573         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
574         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
575         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
576         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
577         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
578         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
579         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
580         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
581         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
582         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
583         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
584         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
585         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
586         EVENT_CONSTRAINT_END
587 };
588
589 static struct intel_uncore_type snbep_uncore_ubox = {
590         .name           = "ubox",
591         .num_counters   = 2,
592         .num_boxes      = 1,
593         .perf_ctr_bits  = 44,
594         .fixed_ctr_bits = 48,
595         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
596         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
597         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
598         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
599         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
600         .ops            = &snbep_uncore_msr_ops,
601         .format_group   = &snbep_uncore_ubox_format_group,
602 };
603
604 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
605         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
606                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
607         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
608         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
609         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
610         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
611         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
612         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
613         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
614         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
615         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
616         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
617         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
618         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
619         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
620         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
621         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
622         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
623         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
624         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
625         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
626         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
627         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
628         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
629         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
630         EVENT_EXTRA_END
631 };
632
633 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
634 {
635         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
636         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
637         int i;
638
639         if (uncore_box_is_fake(box))
640                 return;
641
642         for (i = 0; i < 5; i++) {
643                 if (reg1->alloc & (0x1 << i))
644                         atomic_sub(1 << (i * 6), &er->ref);
645         }
646         reg1->alloc = 0;
647 }
648
649 static struct event_constraint *
650 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
651                             u64 (*cbox_filter_mask)(int fields))
652 {
653         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
654         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
655         int i, alloc = 0;
656         unsigned long flags;
657         u64 mask;
658
659         if (reg1->idx == EXTRA_REG_NONE)
660                 return NULL;
661
662         raw_spin_lock_irqsave(&er->lock, flags);
663         for (i = 0; i < 5; i++) {
664                 if (!(reg1->idx & (0x1 << i)))
665                         continue;
666                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
667                         continue;
668
669                 mask = cbox_filter_mask(0x1 << i);
670                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
671                     !((reg1->config ^ er->config) & mask)) {
672                         atomic_add(1 << (i * 6), &er->ref);
673                         er->config &= ~mask;
674                         er->config |= reg1->config & mask;
675                         alloc |= (0x1 << i);
676                 } else {
677                         break;
678                 }
679         }
680         raw_spin_unlock_irqrestore(&er->lock, flags);
681         if (i < 5)
682                 goto fail;
683
684         if (!uncore_box_is_fake(box))
685                 reg1->alloc |= alloc;
686
687         return NULL;
688 fail:
689         for (; i >= 0; i--) {
690                 if (alloc & (0x1 << i))
691                         atomic_sub(1 << (i * 6), &er->ref);
692         }
693         return &uncore_constraint_empty;
694 }
695
696 static u64 snbep_cbox_filter_mask(int fields)
697 {
698         u64 mask = 0;
699
700         if (fields & 0x1)
701                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
702         if (fields & 0x2)
703                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
704         if (fields & 0x4)
705                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
706         if (fields & 0x8)
707                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
708
709         return mask;
710 }
711
712 static struct event_constraint *
713 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
714 {
715         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
716 }
717
718 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
719 {
720         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
721         struct extra_reg *er;
722         int idx = 0;
723
724         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
725                 if (er->event != (event->hw.config & er->config_mask))
726                         continue;
727                 idx |= er->idx;
728         }
729
730         if (idx) {
731                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
732                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
733                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
734                 reg1->idx = idx;
735         }
736         return 0;
737 }
738
739 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
740         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
741         .hw_config              = snbep_cbox_hw_config,
742         .get_constraint         = snbep_cbox_get_constraint,
743         .put_constraint         = snbep_cbox_put_constraint,
744 };
745
746 static struct intel_uncore_type snbep_uncore_cbox = {
747         .name                   = "cbox",
748         .num_counters           = 4,
749         .num_boxes              = 8,
750         .perf_ctr_bits          = 44,
751         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
752         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
753         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
754         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
755         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
756         .num_shared_regs        = 1,
757         .constraints            = snbep_uncore_cbox_constraints,
758         .ops                    = &snbep_uncore_cbox_ops,
759         .format_group           = &snbep_uncore_cbox_format_group,
760 };
761
762 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
763 {
764         struct hw_perf_event *hwc = &event->hw;
765         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
766         u64 config = reg1->config;
767
768         if (new_idx > reg1->idx)
769                 config <<= 8 * (new_idx - reg1->idx);
770         else
771                 config >>= 8 * (reg1->idx - new_idx);
772
773         if (modify) {
774                 hwc->config += new_idx - reg1->idx;
775                 reg1->config = config;
776                 reg1->idx = new_idx;
777         }
778         return config;
779 }
780
781 static struct event_constraint *
782 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
783 {
784         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
785         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
786         unsigned long flags;
787         int idx = reg1->idx;
788         u64 mask, config1 = reg1->config;
789         bool ok = false;
790
791         if (reg1->idx == EXTRA_REG_NONE ||
792             (!uncore_box_is_fake(box) && reg1->alloc))
793                 return NULL;
794 again:
795         mask = 0xffULL << (idx * 8);
796         raw_spin_lock_irqsave(&er->lock, flags);
797         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
798             !((config1 ^ er->config) & mask)) {
799                 atomic_add(1 << (idx * 8), &er->ref);
800                 er->config &= ~mask;
801                 er->config |= config1 & mask;
802                 ok = true;
803         }
804         raw_spin_unlock_irqrestore(&er->lock, flags);
805
806         if (!ok) {
807                 idx = (idx + 1) % 4;
808                 if (idx != reg1->idx) {
809                         config1 = snbep_pcu_alter_er(event, idx, false);
810                         goto again;
811                 }
812                 return &uncore_constraint_empty;
813         }
814
815         if (!uncore_box_is_fake(box)) {
816                 if (idx != reg1->idx)
817                         snbep_pcu_alter_er(event, idx, true);
818                 reg1->alloc = 1;
819         }
820         return NULL;
821 }
822
823 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
824 {
825         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
826         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
827
828         if (uncore_box_is_fake(box) || !reg1->alloc)
829                 return;
830
831         atomic_sub(1 << (reg1->idx * 8), &er->ref);
832         reg1->alloc = 0;
833 }
834
835 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
836 {
837         struct hw_perf_event *hwc = &event->hw;
838         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
839         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
840
841         if (ev_sel >= 0xb && ev_sel <= 0xe) {
842                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
843                 reg1->idx = ev_sel - 0xb;
844                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
845         }
846         return 0;
847 }
848
849 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
850         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
851         .hw_config              = snbep_pcu_hw_config,
852         .get_constraint         = snbep_pcu_get_constraint,
853         .put_constraint         = snbep_pcu_put_constraint,
854 };
855
856 static struct intel_uncore_type snbep_uncore_pcu = {
857         .name                   = "pcu",
858         .num_counters           = 4,
859         .num_boxes              = 1,
860         .perf_ctr_bits          = 48,
861         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
862         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
863         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
864         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
865         .num_shared_regs        = 1,
866         .ops                    = &snbep_uncore_pcu_ops,
867         .format_group           = &snbep_uncore_pcu_format_group,
868 };
869
870 static struct intel_uncore_type *snbep_msr_uncores[] = {
871         &snbep_uncore_ubox,
872         &snbep_uncore_cbox,
873         &snbep_uncore_pcu,
874         NULL,
875 };
876
877 void snbep_uncore_cpu_init(void)
878 {
879         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
880                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
881         uncore_msr_uncores = snbep_msr_uncores;
882 }
883
884 enum {
885         SNBEP_PCI_QPI_PORT0_FILTER,
886         SNBEP_PCI_QPI_PORT1_FILTER,
887 };
888
889 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
890 {
891         struct hw_perf_event *hwc = &event->hw;
892         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
893         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
894
895         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
896                 reg1->idx = 0;
897                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
898                 reg1->config = event->attr.config1;
899                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
900                 reg2->config = event->attr.config2;
901         }
902         return 0;
903 }
904
905 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
906 {
907         struct pci_dev *pdev = box->pci_dev;
908         struct hw_perf_event *hwc = &event->hw;
909         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
910         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
911
912         if (reg1->idx != EXTRA_REG_NONE) {
913                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
914                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
915                 if (filter_pdev) {
916                         pci_write_config_dword(filter_pdev, reg1->reg,
917                                                 (u32)reg1->config);
918                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
919                                                 (u32)(reg1->config >> 32));
920                         pci_write_config_dword(filter_pdev, reg2->reg,
921                                                 (u32)reg2->config);
922                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
923                                                 (u32)(reg2->config >> 32));
924                 }
925         }
926
927         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
928 }
929
930 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
931         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
932         .enable_event           = snbep_qpi_enable_event,
933         .hw_config              = snbep_qpi_hw_config,
934         .get_constraint         = uncore_get_constraint,
935         .put_constraint         = uncore_put_constraint,
936 };
937
938 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
939         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
940         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
941         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
942         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
943         .ops            = &snbep_uncore_pci_ops,                \
944         .format_group   = &snbep_uncore_format_group
945
946 static struct intel_uncore_type snbep_uncore_ha = {
947         .name           = "ha",
948         .num_counters   = 4,
949         .num_boxes      = 1,
950         .perf_ctr_bits  = 48,
951         SNBEP_UNCORE_PCI_COMMON_INIT(),
952 };
953
954 static struct intel_uncore_type snbep_uncore_imc = {
955         .name           = "imc",
956         .num_counters   = 4,
957         .num_boxes      = 4,
958         .perf_ctr_bits  = 48,
959         .fixed_ctr_bits = 48,
960         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
961         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
962         .event_descs    = snbep_uncore_imc_events,
963         SNBEP_UNCORE_PCI_COMMON_INIT(),
964 };
965
966 static struct intel_uncore_type snbep_uncore_qpi = {
967         .name                   = "qpi",
968         .num_counters           = 4,
969         .num_boxes              = 2,
970         .perf_ctr_bits          = 48,
971         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
972         .event_ctl              = SNBEP_PCI_PMON_CTL0,
973         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
974         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
975         .num_shared_regs        = 1,
976         .ops                    = &snbep_uncore_qpi_ops,
977         .event_descs            = snbep_uncore_qpi_events,
978         .format_group           = &snbep_uncore_qpi_format_group,
979 };
980
981
982 static struct intel_uncore_type snbep_uncore_r2pcie = {
983         .name           = "r2pcie",
984         .num_counters   = 4,
985         .num_boxes      = 1,
986         .perf_ctr_bits  = 44,
987         .constraints    = snbep_uncore_r2pcie_constraints,
988         SNBEP_UNCORE_PCI_COMMON_INIT(),
989 };
990
991 static struct intel_uncore_type snbep_uncore_r3qpi = {
992         .name           = "r3qpi",
993         .num_counters   = 3,
994         .num_boxes      = 2,
995         .perf_ctr_bits  = 44,
996         .constraints    = snbep_uncore_r3qpi_constraints,
997         SNBEP_UNCORE_PCI_COMMON_INIT(),
998 };
999
1000 enum {
1001         SNBEP_PCI_UNCORE_HA,
1002         SNBEP_PCI_UNCORE_IMC,
1003         SNBEP_PCI_UNCORE_QPI,
1004         SNBEP_PCI_UNCORE_R2PCIE,
1005         SNBEP_PCI_UNCORE_R3QPI,
1006 };
1007
1008 static struct intel_uncore_type *snbep_pci_uncores[] = {
1009         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1010         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1011         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1012         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1013         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1014         NULL,
1015 };
1016
1017 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1018         { /* Home Agent */
1019                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1020                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1021         },
1022         { /* MC Channel 0 */
1023                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1024                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1025         },
1026         { /* MC Channel 1 */
1027                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1028                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1029         },
1030         { /* MC Channel 2 */
1031                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1032                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1033         },
1034         { /* MC Channel 3 */
1035                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1036                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1037         },
1038         { /* QPI Port 0 */
1039                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1040                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1041         },
1042         { /* QPI Port 1 */
1043                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1044                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1045         },
1046         { /* R2PCIe */
1047                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1048                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1049         },
1050         { /* R3QPI Link 0 */
1051                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1052                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1053         },
1054         { /* R3QPI Link 1 */
1055                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1056                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1057         },
1058         { /* QPI Port 0 filter  */
1059                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1060                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1061                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1062         },
1063         { /* QPI Port 0 filter  */
1064                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1065                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1066                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1067         },
1068         { /* end: all zeroes */ }
1069 };
1070
1071 static struct pci_driver snbep_uncore_pci_driver = {
1072         .name           = "snbep_uncore",
1073         .id_table       = snbep_uncore_pci_ids,
1074 };
1075
1076 /*
1077  * build pci bus to socket mapping
1078  */
1079 static int snbep_pci2phy_map_init(int devid)
1080 {
1081         struct pci_dev *ubox_dev = NULL;
1082         int i, bus, nodeid;
1083         int err = 0;
1084         u32 config = 0;
1085
1086         while (1) {
1087                 /* find the UBOX device */
1088                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1089                 if (!ubox_dev)
1090                         break;
1091                 bus = ubox_dev->bus->number;
1092                 /* get the Node ID of the local register */
1093                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1094                 if (err)
1095                         break;
1096                 nodeid = config;
1097                 /* get the Node ID mapping */
1098                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1099                 if (err)
1100                         break;
1101                 /*
1102                  * every three bits in the Node ID mapping register maps
1103                  * to a particular node.
1104                  */
1105                 for (i = 0; i < 8; i++) {
1106                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1107                                 uncore_pcibus_to_physid[bus] = i;
1108                                 break;
1109                         }
1110                 }
1111         }
1112
1113         if (!err) {
1114                 /*
1115                  * For PCI bus with no UBOX device, find the next bus
1116                  * that has UBOX device and use its mapping.
1117                  */
1118                 i = -1;
1119                 for (bus = 255; bus >= 0; bus--) {
1120                         if (uncore_pcibus_to_physid[bus] >= 0)
1121                                 i = uncore_pcibus_to_physid[bus];
1122                         else
1123                                 uncore_pcibus_to_physid[bus] = i;
1124                 }
1125         }
1126
1127         if (ubox_dev)
1128                 pci_dev_put(ubox_dev);
1129
1130         return err ? pcibios_err_to_errno(err) : 0;
1131 }
1132
1133 int snbep_uncore_pci_init(void)
1134 {
1135         int ret = snbep_pci2phy_map_init(0x3ce0);
1136         if (ret)
1137                 return ret;
1138         uncore_pci_uncores = snbep_pci_uncores;
1139         uncore_pci_driver = &snbep_uncore_pci_driver;
1140         return 0;
1141 }
1142 /* end of Sandy Bridge-EP uncore support */
1143
1144 /* IvyTown uncore support */
1145 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1146 {
1147         unsigned msr = uncore_msr_box_ctl(box);
1148         if (msr)
1149                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1150 }
1151
1152 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1153 {
1154         struct pci_dev *pdev = box->pci_dev;
1155
1156         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1157 }
1158
1159 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1160         .init_box       = ivbep_uncore_msr_init_box,            \
1161         .disable_box    = snbep_uncore_msr_disable_box,         \
1162         .enable_box     = snbep_uncore_msr_enable_box,          \
1163         .disable_event  = snbep_uncore_msr_disable_event,       \
1164         .enable_event   = snbep_uncore_msr_enable_event,        \
1165         .read_counter   = uncore_msr_read_counter
1166
1167 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1168         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1169 };
1170
1171 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1172         .init_box       = ivbep_uncore_pci_init_box,
1173         .disable_box    = snbep_uncore_pci_disable_box,
1174         .enable_box     = snbep_uncore_pci_enable_box,
1175         .disable_event  = snbep_uncore_pci_disable_event,
1176         .enable_event   = snbep_uncore_pci_enable_event,
1177         .read_counter   = snbep_uncore_pci_read_counter,
1178 };
1179
1180 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1181         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1182         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1183         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1184         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1185         .ops            = &ivbep_uncore_pci_ops,                        \
1186         .format_group   = &ivbep_uncore_format_group
1187
1188 static struct attribute *ivbep_uncore_formats_attr[] = {
1189         &format_attr_event.attr,
1190         &format_attr_umask.attr,
1191         &format_attr_edge.attr,
1192         &format_attr_inv.attr,
1193         &format_attr_thresh8.attr,
1194         NULL,
1195 };
1196
1197 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1198         &format_attr_event.attr,
1199         &format_attr_umask.attr,
1200         &format_attr_edge.attr,
1201         &format_attr_inv.attr,
1202         &format_attr_thresh5.attr,
1203         NULL,
1204 };
1205
1206 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1207         &format_attr_event.attr,
1208         &format_attr_umask.attr,
1209         &format_attr_edge.attr,
1210         &format_attr_tid_en.attr,
1211         &format_attr_thresh8.attr,
1212         &format_attr_filter_tid.attr,
1213         &format_attr_filter_link.attr,
1214         &format_attr_filter_state2.attr,
1215         &format_attr_filter_nid2.attr,
1216         &format_attr_filter_opc2.attr,
1217         &format_attr_filter_nc.attr,
1218         &format_attr_filter_c6.attr,
1219         &format_attr_filter_isoc.attr,
1220         NULL,
1221 };
1222
1223 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1224         &format_attr_event_ext.attr,
1225         &format_attr_occ_sel.attr,
1226         &format_attr_edge.attr,
1227         &format_attr_thresh5.attr,
1228         &format_attr_occ_invert.attr,
1229         &format_attr_occ_edge.attr,
1230         &format_attr_filter_band0.attr,
1231         &format_attr_filter_band1.attr,
1232         &format_attr_filter_band2.attr,
1233         &format_attr_filter_band3.attr,
1234         NULL,
1235 };
1236
1237 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1238         &format_attr_event_ext.attr,
1239         &format_attr_umask.attr,
1240         &format_attr_edge.attr,
1241         &format_attr_thresh8.attr,
1242         &format_attr_match_rds.attr,
1243         &format_attr_match_rnid30.attr,
1244         &format_attr_match_rnid4.attr,
1245         &format_attr_match_dnid.attr,
1246         &format_attr_match_mc.attr,
1247         &format_attr_match_opc.attr,
1248         &format_attr_match_vnw.attr,
1249         &format_attr_match0.attr,
1250         &format_attr_match1.attr,
1251         &format_attr_mask_rds.attr,
1252         &format_attr_mask_rnid30.attr,
1253         &format_attr_mask_rnid4.attr,
1254         &format_attr_mask_dnid.attr,
1255         &format_attr_mask_mc.attr,
1256         &format_attr_mask_opc.attr,
1257         &format_attr_mask_vnw.attr,
1258         &format_attr_mask0.attr,
1259         &format_attr_mask1.attr,
1260         NULL,
1261 };
1262
1263 static struct attribute_group ivbep_uncore_format_group = {
1264         .name = "format",
1265         .attrs = ivbep_uncore_formats_attr,
1266 };
1267
1268 static struct attribute_group ivbep_uncore_ubox_format_group = {
1269         .name = "format",
1270         .attrs = ivbep_uncore_ubox_formats_attr,
1271 };
1272
1273 static struct attribute_group ivbep_uncore_cbox_format_group = {
1274         .name = "format",
1275         .attrs = ivbep_uncore_cbox_formats_attr,
1276 };
1277
1278 static struct attribute_group ivbep_uncore_pcu_format_group = {
1279         .name = "format",
1280         .attrs = ivbep_uncore_pcu_formats_attr,
1281 };
1282
1283 static struct attribute_group ivbep_uncore_qpi_format_group = {
1284         .name = "format",
1285         .attrs = ivbep_uncore_qpi_formats_attr,
1286 };
1287
1288 static struct intel_uncore_type ivbep_uncore_ubox = {
1289         .name           = "ubox",
1290         .num_counters   = 2,
1291         .num_boxes      = 1,
1292         .perf_ctr_bits  = 44,
1293         .fixed_ctr_bits = 48,
1294         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1295         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1296         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1297         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1298         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1299         .ops            = &ivbep_uncore_msr_ops,
1300         .format_group   = &ivbep_uncore_ubox_format_group,
1301 };
1302
1303 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1304         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1305                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1306         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1307         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1308         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1309         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1310         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1311         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1312         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1313         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1314         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1315         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1316         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1317         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1318         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1319         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1320         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1321         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1322         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1323         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1324         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1325         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1326         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1327         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1328         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1329         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1330         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1331         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1332         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1333         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1334         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1335         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1336         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1337         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1338         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1339         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1340         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1341         EVENT_EXTRA_END
1342 };
1343
1344 static u64 ivbep_cbox_filter_mask(int fields)
1345 {
1346         u64 mask = 0;
1347
1348         if (fields & 0x1)
1349                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1350         if (fields & 0x2)
1351                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1352         if (fields & 0x4)
1353                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1354         if (fields & 0x8)
1355                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1356         if (fields & 0x10) {
1357                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1358                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1359                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1360                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1361         }
1362
1363         return mask;
1364 }
1365
1366 static struct event_constraint *
1367 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1368 {
1369         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1370 }
1371
1372 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1373 {
1374         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1375         struct extra_reg *er;
1376         int idx = 0;
1377
1378         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1379                 if (er->event != (event->hw.config & er->config_mask))
1380                         continue;
1381                 idx |= er->idx;
1382         }
1383
1384         if (idx) {
1385                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1386                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1387                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1388                 reg1->idx = idx;
1389         }
1390         return 0;
1391 }
1392
1393 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1394 {
1395         struct hw_perf_event *hwc = &event->hw;
1396         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1397
1398         if (reg1->idx != EXTRA_REG_NONE) {
1399                 u64 filter = uncore_shared_reg_config(box, 0);
1400                 wrmsrl(reg1->reg, filter & 0xffffffff);
1401                 wrmsrl(reg1->reg + 6, filter >> 32);
1402         }
1403
1404         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1405 }
1406
1407 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1408         .init_box               = ivbep_uncore_msr_init_box,
1409         .disable_box            = snbep_uncore_msr_disable_box,
1410         .enable_box             = snbep_uncore_msr_enable_box,
1411         .disable_event          = snbep_uncore_msr_disable_event,
1412         .enable_event           = ivbep_cbox_enable_event,
1413         .read_counter           = uncore_msr_read_counter,
1414         .hw_config              = ivbep_cbox_hw_config,
1415         .get_constraint         = ivbep_cbox_get_constraint,
1416         .put_constraint         = snbep_cbox_put_constraint,
1417 };
1418
1419 static struct intel_uncore_type ivbep_uncore_cbox = {
1420         .name                   = "cbox",
1421         .num_counters           = 4,
1422         .num_boxes              = 15,
1423         .perf_ctr_bits          = 44,
1424         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1425         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1426         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1427         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1428         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1429         .num_shared_regs        = 1,
1430         .constraints            = snbep_uncore_cbox_constraints,
1431         .ops                    = &ivbep_uncore_cbox_ops,
1432         .format_group           = &ivbep_uncore_cbox_format_group,
1433 };
1434
1435 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1436         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1437         .hw_config              = snbep_pcu_hw_config,
1438         .get_constraint         = snbep_pcu_get_constraint,
1439         .put_constraint         = snbep_pcu_put_constraint,
1440 };
1441
1442 static struct intel_uncore_type ivbep_uncore_pcu = {
1443         .name                   = "pcu",
1444         .num_counters           = 4,
1445         .num_boxes              = 1,
1446         .perf_ctr_bits          = 48,
1447         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1448         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1449         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1450         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1451         .num_shared_regs        = 1,
1452         .ops                    = &ivbep_uncore_pcu_ops,
1453         .format_group           = &ivbep_uncore_pcu_format_group,
1454 };
1455
1456 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1457         &ivbep_uncore_ubox,
1458         &ivbep_uncore_cbox,
1459         &ivbep_uncore_pcu,
1460         NULL,
1461 };
1462
1463 void ivbep_uncore_cpu_init(void)
1464 {
1465         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1466                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1467         uncore_msr_uncores = ivbep_msr_uncores;
1468 }
1469
1470 static struct intel_uncore_type ivbep_uncore_ha = {
1471         .name           = "ha",
1472         .num_counters   = 4,
1473         .num_boxes      = 2,
1474         .perf_ctr_bits  = 48,
1475         IVBEP_UNCORE_PCI_COMMON_INIT(),
1476 };
1477
1478 static struct intel_uncore_type ivbep_uncore_imc = {
1479         .name           = "imc",
1480         .num_counters   = 4,
1481         .num_boxes      = 8,
1482         .perf_ctr_bits  = 48,
1483         .fixed_ctr_bits = 48,
1484         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1485         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1486         .event_descs    = snbep_uncore_imc_events,
1487         IVBEP_UNCORE_PCI_COMMON_INIT(),
1488 };
1489
1490 /* registers in IRP boxes are not properly aligned */
1491 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1492 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1493
1494 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1495 {
1496         struct pci_dev *pdev = box->pci_dev;
1497         struct hw_perf_event *hwc = &event->hw;
1498
1499         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1500                                hwc->config | SNBEP_PMON_CTL_EN);
1501 }
1502
1503 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1504 {
1505         struct pci_dev *pdev = box->pci_dev;
1506         struct hw_perf_event *hwc = &event->hw;
1507
1508         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1509 }
1510
1511 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1512 {
1513         struct pci_dev *pdev = box->pci_dev;
1514         struct hw_perf_event *hwc = &event->hw;
1515         u64 count = 0;
1516
1517         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1518         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1519
1520         return count;
1521 }
1522
1523 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1524         .init_box       = ivbep_uncore_pci_init_box,
1525         .disable_box    = snbep_uncore_pci_disable_box,
1526         .enable_box     = snbep_uncore_pci_enable_box,
1527         .disable_event  = ivbep_uncore_irp_disable_event,
1528         .enable_event   = ivbep_uncore_irp_enable_event,
1529         .read_counter   = ivbep_uncore_irp_read_counter,
1530 };
1531
1532 static struct intel_uncore_type ivbep_uncore_irp = {
1533         .name                   = "irp",
1534         .num_counters           = 4,
1535         .num_boxes              = 1,
1536         .perf_ctr_bits          = 48,
1537         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1538         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1539         .ops                    = &ivbep_uncore_irp_ops,
1540         .format_group           = &ivbep_uncore_format_group,
1541 };
1542
1543 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1544         .init_box       = ivbep_uncore_pci_init_box,
1545         .disable_box    = snbep_uncore_pci_disable_box,
1546         .enable_box     = snbep_uncore_pci_enable_box,
1547         .disable_event  = snbep_uncore_pci_disable_event,
1548         .enable_event   = snbep_qpi_enable_event,
1549         .read_counter   = snbep_uncore_pci_read_counter,
1550         .hw_config      = snbep_qpi_hw_config,
1551         .get_constraint = uncore_get_constraint,
1552         .put_constraint = uncore_put_constraint,
1553 };
1554
1555 static struct intel_uncore_type ivbep_uncore_qpi = {
1556         .name                   = "qpi",
1557         .num_counters           = 4,
1558         .num_boxes              = 3,
1559         .perf_ctr_bits          = 48,
1560         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1561         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1562         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1563         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1564         .num_shared_regs        = 1,
1565         .ops                    = &ivbep_uncore_qpi_ops,
1566         .format_group           = &ivbep_uncore_qpi_format_group,
1567 };
1568
1569 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1570         .name           = "r2pcie",
1571         .num_counters   = 4,
1572         .num_boxes      = 1,
1573         .perf_ctr_bits  = 44,
1574         .constraints    = snbep_uncore_r2pcie_constraints,
1575         IVBEP_UNCORE_PCI_COMMON_INIT(),
1576 };
1577
1578 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1579         .name           = "r3qpi",
1580         .num_counters   = 3,
1581         .num_boxes      = 2,
1582         .perf_ctr_bits  = 44,
1583         .constraints    = snbep_uncore_r3qpi_constraints,
1584         IVBEP_UNCORE_PCI_COMMON_INIT(),
1585 };
1586
1587 enum {
1588         IVBEP_PCI_UNCORE_HA,
1589         IVBEP_PCI_UNCORE_IMC,
1590         IVBEP_PCI_UNCORE_IRP,
1591         IVBEP_PCI_UNCORE_QPI,
1592         IVBEP_PCI_UNCORE_R2PCIE,
1593         IVBEP_PCI_UNCORE_R3QPI,
1594 };
1595
1596 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1597         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1598         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1599         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1600         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1601         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1602         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1603         NULL,
1604 };
1605
1606 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1607         { /* Home Agent 0 */
1608                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1609                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1610         },
1611         { /* Home Agent 1 */
1612                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1613                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1614         },
1615         { /* MC0 Channel 0 */
1616                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1617                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1618         },
1619         { /* MC0 Channel 1 */
1620                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1621                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1622         },
1623         { /* MC0 Channel 3 */
1624                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1625                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1626         },
1627         { /* MC0 Channel 4 */
1628                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1629                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1630         },
1631         { /* MC1 Channel 0 */
1632                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1633                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1634         },
1635         { /* MC1 Channel 1 */
1636                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1637                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1638         },
1639         { /* MC1 Channel 3 */
1640                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1641                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1642         },
1643         { /* MC1 Channel 4 */
1644                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1645                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1646         },
1647         { /* IRP */
1648                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1649                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1650         },
1651         { /* QPI0 Port 0 */
1652                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1653                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1654         },
1655         { /* QPI0 Port 1 */
1656                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1657                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1658         },
1659         { /* QPI1 Port 2 */
1660                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1661                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1662         },
1663         { /* R2PCIe */
1664                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1665                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1666         },
1667         { /* R3QPI0 Link 0 */
1668                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1669                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1670         },
1671         { /* R3QPI0 Link 1 */
1672                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1673                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1674         },
1675         { /* R3QPI1 Link 2 */
1676                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1677                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1678         },
1679         { /* QPI Port 0 filter  */
1680                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1681                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1682                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1683         },
1684         { /* QPI Port 0 filter  */
1685                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1686                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1687                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1688         },
1689         { /* end: all zeroes */ }
1690 };
1691
1692 static struct pci_driver ivbep_uncore_pci_driver = {
1693         .name           = "ivbep_uncore",
1694         .id_table       = ivbep_uncore_pci_ids,
1695 };
1696
1697 int ivbep_uncore_pci_init(void)
1698 {
1699         int ret = snbep_pci2phy_map_init(0x0e1e);
1700         if (ret)
1701                 return ret;
1702         uncore_pci_uncores = ivbep_pci_uncores;
1703         uncore_pci_driver = &ivbep_uncore_pci_driver;
1704         return 0;
1705 }
1706 /* end of IvyTown uncore support */
1707
1708 /* Haswell-EP uncore support */
1709 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
1710         &format_attr_event.attr,
1711         &format_attr_umask.attr,
1712         &format_attr_edge.attr,
1713         &format_attr_inv.attr,
1714         &format_attr_thresh5.attr,
1715         &format_attr_filter_tid2.attr,
1716         &format_attr_filter_cid.attr,
1717         NULL,
1718 };
1719
1720 static struct attribute_group hswep_uncore_ubox_format_group = {
1721         .name = "format",
1722         .attrs = hswep_uncore_ubox_formats_attr,
1723 };
1724
1725 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1726 {
1727         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1728         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
1729         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
1730         reg1->idx = 0;
1731         return 0;
1732 }
1733
1734 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
1735         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1736         .hw_config              = hswep_ubox_hw_config,
1737         .get_constraint         = uncore_get_constraint,
1738         .put_constraint         = uncore_put_constraint,
1739 };
1740
1741 static struct intel_uncore_type hswep_uncore_ubox = {
1742         .name                   = "ubox",
1743         .num_counters           = 2,
1744         .num_boxes              = 1,
1745         .perf_ctr_bits          = 44,
1746         .fixed_ctr_bits         = 48,
1747         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1748         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1749         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
1750         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1751         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1752         .num_shared_regs        = 1,
1753         .ops                    = &hswep_uncore_ubox_ops,
1754         .format_group           = &hswep_uncore_ubox_format_group,
1755 };
1756
1757 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
1758         &format_attr_event.attr,
1759         &format_attr_umask.attr,
1760         &format_attr_edge.attr,
1761         &format_attr_tid_en.attr,
1762         &format_attr_thresh8.attr,
1763         &format_attr_filter_tid3.attr,
1764         &format_attr_filter_link2.attr,
1765         &format_attr_filter_state3.attr,
1766         &format_attr_filter_nid2.attr,
1767         &format_attr_filter_opc2.attr,
1768         &format_attr_filter_nc.attr,
1769         &format_attr_filter_c6.attr,
1770         &format_attr_filter_isoc.attr,
1771         NULL,
1772 };
1773
1774 static struct attribute_group hswep_uncore_cbox_format_group = {
1775         .name = "format",
1776         .attrs = hswep_uncore_cbox_formats_attr,
1777 };
1778
1779 static struct event_constraint hswep_uncore_cbox_constraints[] = {
1780         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
1781         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
1782         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1783         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1784         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
1785         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
1786         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
1787         EVENT_CONSTRAINT_END
1788 };
1789
1790 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
1791         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1792                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1793         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1794         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1795         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1796         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1797         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
1798         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
1799         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1800         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
1801         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
1802         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
1803         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
1804         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
1805         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
1806         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1807         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1808         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1809         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1810         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1811         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1812         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1813         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1814         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1815         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1816         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1817         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1818         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1819         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1820         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1821         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1822         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1823         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1824         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1825         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1826         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1827         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1828         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1829         EVENT_EXTRA_END
1830 };
1831
1832 static u64 hswep_cbox_filter_mask(int fields)
1833 {
1834         u64 mask = 0;
1835         if (fields & 0x1)
1836                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
1837         if (fields & 0x2)
1838                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1839         if (fields & 0x4)
1840                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1841         if (fields & 0x8)
1842                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
1843         if (fields & 0x10) {
1844                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1845                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
1846                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
1847                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1848         }
1849         return mask;
1850 }
1851
1852 static struct event_constraint *
1853 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1854 {
1855         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
1856 }
1857
1858 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1859 {
1860         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1861         struct extra_reg *er;
1862         int idx = 0;
1863
1864         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
1865                 if (er->event != (event->hw.config & er->config_mask))
1866                         continue;
1867                 idx |= er->idx;
1868         }
1869
1870         if (idx) {
1871                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1872                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1873                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
1874                 reg1->idx = idx;
1875         }
1876         return 0;
1877 }
1878
1879 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1880                                   struct perf_event *event)
1881 {
1882         struct hw_perf_event *hwc = &event->hw;
1883         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1884
1885         if (reg1->idx != EXTRA_REG_NONE) {
1886                 u64 filter = uncore_shared_reg_config(box, 0);
1887                 wrmsrl(reg1->reg, filter & 0xffffffff);
1888                 wrmsrl(reg1->reg + 1, filter >> 32);
1889         }
1890
1891         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1892 }
1893
1894 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
1895         .init_box               = snbep_uncore_msr_init_box,
1896         .disable_box            = snbep_uncore_msr_disable_box,
1897         .enable_box             = snbep_uncore_msr_enable_box,
1898         .disable_event          = snbep_uncore_msr_disable_event,
1899         .enable_event           = hswep_cbox_enable_event,
1900         .read_counter           = uncore_msr_read_counter,
1901         .hw_config              = hswep_cbox_hw_config,
1902         .get_constraint         = hswep_cbox_get_constraint,
1903         .put_constraint         = snbep_cbox_put_constraint,
1904 };
1905
1906 static struct intel_uncore_type hswep_uncore_cbox = {
1907         .name                   = "cbox",
1908         .num_counters           = 4,
1909         .num_boxes              = 18,
1910         .perf_ctr_bits          = 44,
1911         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
1912         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
1913         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1914         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
1915         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
1916         .num_shared_regs        = 1,
1917         .constraints            = hswep_uncore_cbox_constraints,
1918         .ops                    = &hswep_uncore_cbox_ops,
1919         .format_group           = &hswep_uncore_cbox_format_group,
1920 };
1921
1922 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
1923         &format_attr_event.attr,
1924         &format_attr_umask.attr,
1925         &format_attr_edge.attr,
1926         &format_attr_tid_en.attr,
1927         &format_attr_inv.attr,
1928         &format_attr_thresh8.attr,
1929         NULL,
1930 };
1931
1932 static struct attribute_group hswep_uncore_sbox_format_group = {
1933         .name = "format",
1934         .attrs = hswep_uncore_sbox_formats_attr,
1935 };
1936
1937 static struct intel_uncore_type hswep_uncore_sbox = {
1938         .name                   = "sbox",
1939         .num_counters           = 4,
1940         .num_boxes              = 4,
1941         .perf_ctr_bits          = 44,
1942         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
1943         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
1944         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
1945         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
1946         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
1947         .ops                    = &snbep_uncore_msr_ops,
1948         .format_group           = &hswep_uncore_sbox_format_group,
1949 };
1950
1951 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1952 {
1953         struct hw_perf_event *hwc = &event->hw;
1954         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1955         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1956
1957         if (ev_sel >= 0xb && ev_sel <= 0xe) {
1958                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
1959                 reg1->idx = ev_sel - 0xb;
1960                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
1961         }
1962         return 0;
1963 }
1964
1965 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
1966         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1967         .hw_config              = hswep_pcu_hw_config,
1968         .get_constraint         = snbep_pcu_get_constraint,
1969         .put_constraint         = snbep_pcu_put_constraint,
1970 };
1971
1972 static struct intel_uncore_type hswep_uncore_pcu = {
1973         .name                   = "pcu",
1974         .num_counters           = 4,
1975         .num_boxes              = 1,
1976         .perf_ctr_bits          = 48,
1977         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
1978         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
1979         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1980         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
1981         .num_shared_regs        = 1,
1982         .ops                    = &hswep_uncore_pcu_ops,
1983         .format_group           = &snbep_uncore_pcu_format_group,
1984 };
1985
1986 static struct intel_uncore_type *hswep_msr_uncores[] = {
1987         &hswep_uncore_ubox,
1988         &hswep_uncore_cbox,
1989         &hswep_uncore_sbox,
1990         &hswep_uncore_pcu,
1991         NULL,
1992 };
1993
1994 void hswep_uncore_cpu_init(void)
1995 {
1996         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1997                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1998         uncore_msr_uncores = hswep_msr_uncores;
1999 }
2000
2001 static struct intel_uncore_type hswep_uncore_ha = {
2002         .name           = "ha",
2003         .num_counters   = 5,
2004         .num_boxes      = 2,
2005         .perf_ctr_bits  = 48,
2006         SNBEP_UNCORE_PCI_COMMON_INIT(),
2007 };
2008
2009 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2010         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2011         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2012         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2013         { /* end: all zeroes */ },
2014 };
2015
2016 static struct intel_uncore_type hswep_uncore_imc = {
2017         .name           = "imc",
2018         .num_counters   = 5,
2019         .num_boxes      = 8,
2020         .perf_ctr_bits  = 48,
2021         .fixed_ctr_bits = 48,
2022         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2023         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2024         .event_descs    = hswep_uncore_imc_events,
2025         SNBEP_UNCORE_PCI_COMMON_INIT(),
2026 };
2027
2028 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2029         .init_box       = snbep_uncore_pci_init_box,
2030         .disable_box    = snbep_uncore_pci_disable_box,
2031         .enable_box     = snbep_uncore_pci_enable_box,
2032         .disable_event  = ivbep_uncore_irp_disable_event,
2033         .enable_event   = ivbep_uncore_irp_enable_event,
2034         .read_counter   = ivbep_uncore_irp_read_counter,
2035 };
2036
2037 static struct intel_uncore_type hswep_uncore_irp = {
2038         .name                   = "irp",
2039         .num_counters           = 4,
2040         .num_boxes              = 1,
2041         .perf_ctr_bits          = 48,
2042         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2043         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2044         .ops                    = &hswep_uncore_irp_ops,
2045         .format_group           = &snbep_uncore_format_group,
2046 };
2047
2048 static struct intel_uncore_type hswep_uncore_qpi = {
2049         .name                   = "qpi",
2050         .num_counters           = 5,
2051         .num_boxes              = 3,
2052         .perf_ctr_bits          = 48,
2053         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2054         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2055         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2056         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2057         .num_shared_regs        = 1,
2058         .ops                    = &snbep_uncore_qpi_ops,
2059         .format_group           = &snbep_uncore_qpi_format_group,
2060 };
2061
2062 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2063         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2064         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2065         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2066         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2067         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2068         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2069         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2070         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2071         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2072         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2073         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2074         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2075         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2076         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2077         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2078         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2079         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2080         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2081         EVENT_CONSTRAINT_END
2082 };
2083
2084 static struct intel_uncore_type hswep_uncore_r2pcie = {
2085         .name           = "r2pcie",
2086         .num_counters   = 4,
2087         .num_boxes      = 1,
2088         .perf_ctr_bits  = 48,
2089         .constraints    = hswep_uncore_r2pcie_constraints,
2090         SNBEP_UNCORE_PCI_COMMON_INIT(),
2091 };
2092
2093 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2094         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2095         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2096         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2097         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2098         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2099         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2100         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2101         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2102         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2103         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2104         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2105         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2106         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2107         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2108         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2109         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2110         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2111         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2112         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2113         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2114         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2115         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2116         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2117         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2118         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2119         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2120         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2121         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2122         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2123         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2124         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2125         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2126         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2127         EVENT_CONSTRAINT_END
2128 };
2129
2130 static struct intel_uncore_type hswep_uncore_r3qpi = {
2131         .name           = "r3qpi",
2132         .num_counters   = 4,
2133         .num_boxes      = 3,
2134         .perf_ctr_bits  = 44,
2135         .constraints    = hswep_uncore_r3qpi_constraints,
2136         SNBEP_UNCORE_PCI_COMMON_INIT(),
2137 };
2138
2139 enum {
2140         HSWEP_PCI_UNCORE_HA,
2141         HSWEP_PCI_UNCORE_IMC,
2142         HSWEP_PCI_UNCORE_IRP,
2143         HSWEP_PCI_UNCORE_QPI,
2144         HSWEP_PCI_UNCORE_R2PCIE,
2145         HSWEP_PCI_UNCORE_R3QPI,
2146 };
2147
2148 static struct intel_uncore_type *hswep_pci_uncores[] = {
2149         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2150         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2151         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2152         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2153         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2154         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2155         NULL,
2156 };
2157
2158 static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
2159         { /* Home Agent 0 */
2160                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2161                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2162         },
2163         { /* Home Agent 1 */
2164                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2165                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2166         },
2167         { /* MC0 Channel 0 */
2168                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2169                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2170         },
2171         { /* MC0 Channel 1 */
2172                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2173                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2174         },
2175         { /* MC0 Channel 2 */
2176                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2177                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2178         },
2179         { /* MC0 Channel 3 */
2180                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2181                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2182         },
2183         { /* MC1 Channel 0 */
2184                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2185                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2186         },
2187         { /* MC1 Channel 1 */
2188                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2189                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2190         },
2191         { /* MC1 Channel 2 */
2192                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2193                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2194         },
2195         { /* MC1 Channel 3 */
2196                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2197                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2198         },
2199         { /* IRP */
2200                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2201                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2202         },
2203         { /* QPI0 Port 0 */
2204                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2205                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2206         },
2207         { /* QPI0 Port 1 */
2208                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2209                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2210         },
2211         { /* QPI1 Port 2 */
2212                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2213                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2214         },
2215         { /* R2PCIe */
2216                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2217                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2218         },
2219         { /* R3QPI0 Link 0 */
2220                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2221                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2222         },
2223         { /* R3QPI0 Link 1 */
2224                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2225                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2226         },
2227         { /* R3QPI1 Link 2 */
2228                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2229                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2230         },
2231         { /* QPI Port 0 filter  */
2232                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2233                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2234                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2235         },
2236         { /* QPI Port 1 filter  */
2237                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2238                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2239                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2240         },
2241         { /* end: all zeroes */ }
2242 };
2243
2244 static struct pci_driver hswep_uncore_pci_driver = {
2245         .name           = "hswep_uncore",
2246         .id_table       = hswep_uncore_pci_ids,
2247 };
2248
2249 int hswep_uncore_pci_init(void)
2250 {
2251         int ret = snbep_pci2phy_map_init(0x2f1e);
2252         if (ret)
2253                 return ret;
2254         uncore_pci_uncores = hswep_pci_uncores;
2255         uncore_pci_driver = &hswep_uncore_pci_driver;
2256         return 0;
2257 }
2258 /* end of Haswell-EP uncore support */