x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / drivers / perf / xgene_pmu.c
1 /*
2  * APM X-Gene SoC PMU (Performance Monitor Unit)
3  *
4  * Copyright (c) 2016, Applied Micro Circuits Corporation
5  * Author: Hoan Tran <hotran@apm.com>
6  *         Tai Nguyen <ttnguyen@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include <linux/acpi.h>
23 #include <linux/clk.h>
24 #include <linux/cpumask.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/of_address.h>
29 #include <linux/of_fdt.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_platform.h>
32 #include <linux/perf_event.h>
33 #include <linux/platform_device.h>
34 #include <linux/regmap.h>
35 #include <linux/slab.h>
36
37 #define CSW_CSWCR                       0x0000
38 #define  CSW_CSWCR_DUALMCB_MASK         BIT(0)
39 #define MCBADDRMR                       0x0000
40 #define  MCBADDRMR_DUALMCU_MODE_MASK    BIT(2)
41
42 #define PCPPMU_INTSTATUS_REG    0x000
43 #define PCPPMU_INTMASK_REG      0x004
44 #define  PCPPMU_INTMASK         0x0000000F
45 #define  PCPPMU_INTENMASK       0xFFFFFFFF
46 #define  PCPPMU_INTCLRMASK      0xFFFFFFF0
47 #define  PCPPMU_INT_MCU         BIT(0)
48 #define  PCPPMU_INT_MCB         BIT(1)
49 #define  PCPPMU_INT_L3C         BIT(2)
50 #define  PCPPMU_INT_IOB         BIT(3)
51
52 #define PMU_MAX_COUNTERS        4
53 #define PMU_CNT_MAX_PERIOD      0x100000000ULL
54 #define PMU_OVERFLOW_MASK       0xF
55 #define PMU_PMCR_E              BIT(0)
56 #define PMU_PMCR_P              BIT(1)
57
58 #define PMU_PMEVCNTR0           0x000
59 #define PMU_PMEVCNTR1           0x004
60 #define PMU_PMEVCNTR2           0x008
61 #define PMU_PMEVCNTR3           0x00C
62 #define PMU_PMEVTYPER0          0x400
63 #define PMU_PMEVTYPER1          0x404
64 #define PMU_PMEVTYPER2          0x408
65 #define PMU_PMEVTYPER3          0x40C
66 #define PMU_PMAMR0              0xA00
67 #define PMU_PMAMR1              0xA04
68 #define PMU_PMCNTENSET          0xC00
69 #define PMU_PMCNTENCLR          0xC20
70 #define PMU_PMINTENSET          0xC40
71 #define PMU_PMINTENCLR          0xC60
72 #define PMU_PMOVSR              0xC80
73 #define PMU_PMCR                0xE04
74
75 #define to_pmu_dev(p)     container_of(p, struct xgene_pmu_dev, pmu)
76 #define GET_CNTR(ev)      (ev->hw.idx)
77 #define GET_EVENTID(ev)   (ev->hw.config & 0xFFULL)
78 #define GET_AGENTID(ev)   (ev->hw.config_base & 0xFFFFFFFFUL)
79 #define GET_AGENT1ID(ev)  ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
80
81 struct hw_pmu_info {
82         u32 type;
83         u32 enable_mask;
84         void __iomem *csr;
85 };
86
87 struct xgene_pmu_dev {
88         struct hw_pmu_info *inf;
89         struct xgene_pmu *parent;
90         struct pmu pmu;
91         u8 max_counters;
92         DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
93         u64 max_period;
94         const struct attribute_group **attr_groups;
95         struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
96 };
97
98 struct xgene_pmu {
99         struct device *dev;
100         int version;
101         void __iomem *pcppmu_csr;
102         u32 mcb_active_mask;
103         u32 mc_active_mask;
104         cpumask_t cpu;
105         raw_spinlock_t lock;
106         struct list_head l3cpmus;
107         struct list_head iobpmus;
108         struct list_head mcbpmus;
109         struct list_head mcpmus;
110 };
111
112 struct xgene_pmu_dev_ctx {
113         char *name;
114         struct list_head next;
115         struct xgene_pmu_dev *pmu_dev;
116         struct hw_pmu_info inf;
117 };
118
119 struct xgene_pmu_data {
120         int id;
121         u32 data;
122 };
123
124 enum xgene_pmu_version {
125         PCP_PMU_V1 = 1,
126         PCP_PMU_V2,
127 };
128
129 enum xgene_pmu_dev_type {
130         PMU_TYPE_L3C = 0,
131         PMU_TYPE_IOB,
132         PMU_TYPE_MCB,
133         PMU_TYPE_MC,
134 };
135
136 /*
137  * sysfs format attributes
138  */
139 static ssize_t xgene_pmu_format_show(struct device *dev,
140                                      struct device_attribute *attr, char *buf)
141 {
142         struct dev_ext_attribute *eattr;
143
144         eattr = container_of(attr, struct dev_ext_attribute, attr);
145         return sprintf(buf, "%s\n", (char *) eattr->var);
146 }
147
148 #define XGENE_PMU_FORMAT_ATTR(_name, _config)           \
149         (&((struct dev_ext_attribute[]) {               \
150                 { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
151                   .var = (void *) _config, }            \
152         })[0].attr.attr)
153
154 static struct attribute *l3c_pmu_format_attrs[] = {
155         XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
156         XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
157         NULL,
158 };
159
160 static struct attribute *iob_pmu_format_attrs[] = {
161         XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
162         XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
163         NULL,
164 };
165
166 static struct attribute *mcb_pmu_format_attrs[] = {
167         XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
168         XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
169         NULL,
170 };
171
172 static struct attribute *mc_pmu_format_attrs[] = {
173         XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
174         NULL,
175 };
176
177 static const struct attribute_group l3c_pmu_format_attr_group = {
178         .name = "format",
179         .attrs = l3c_pmu_format_attrs,
180 };
181
182 static const struct attribute_group iob_pmu_format_attr_group = {
183         .name = "format",
184         .attrs = iob_pmu_format_attrs,
185 };
186
187 static const struct attribute_group mcb_pmu_format_attr_group = {
188         .name = "format",
189         .attrs = mcb_pmu_format_attrs,
190 };
191
192 static const struct attribute_group mc_pmu_format_attr_group = {
193         .name = "format",
194         .attrs = mc_pmu_format_attrs,
195 };
196
197 /*
198  * sysfs event attributes
199  */
200 static ssize_t xgene_pmu_event_show(struct device *dev,
201                                     struct device_attribute *attr, char *buf)
202 {
203         struct dev_ext_attribute *eattr;
204
205         eattr = container_of(attr, struct dev_ext_attribute, attr);
206         return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
207 }
208
209 #define XGENE_PMU_EVENT_ATTR(_name, _config)            \
210         (&((struct dev_ext_attribute[]) {               \
211                 { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
212                   .var = (void *) _config, }            \
213          })[0].attr.attr)
214
215 static struct attribute *l3c_pmu_events_attrs[] = {
216         XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
217         XGENE_PMU_EVENT_ATTR(cycle-count-div-64,                0x01),
218         XGENE_PMU_EVENT_ATTR(read-hit,                          0x02),
219         XGENE_PMU_EVENT_ATTR(read-miss,                         0x03),
220         XGENE_PMU_EVENT_ATTR(write-need-replacement,            0x06),
221         XGENE_PMU_EVENT_ATTR(write-not-need-replacement,        0x07),
222         XGENE_PMU_EVENT_ATTR(tq-full,                           0x08),
223         XGENE_PMU_EVENT_ATTR(ackq-full,                         0x09),
224         XGENE_PMU_EVENT_ATTR(wdb-full,                          0x0a),
225         XGENE_PMU_EVENT_ATTR(bank-fifo-full,                    0x0b),
226         XGENE_PMU_EVENT_ATTR(odb-full,                          0x0c),
227         XGENE_PMU_EVENT_ATTR(wbq-full,                          0x0d),
228         XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue,          0x0e),
229         XGENE_PMU_EVENT_ATTR(bank-fifo-issue,                   0x0f),
230         NULL,
231 };
232
233 static struct attribute *iob_pmu_events_attrs[] = {
234         XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
235         XGENE_PMU_EVENT_ATTR(cycle-count-div-64,                0x01),
236         XGENE_PMU_EVENT_ATTR(axi0-read,                         0x02),
237         XGENE_PMU_EVENT_ATTR(axi0-read-partial,                 0x03),
238         XGENE_PMU_EVENT_ATTR(axi1-read,                         0x04),
239         XGENE_PMU_EVENT_ATTR(axi1-read-partial,                 0x05),
240         XGENE_PMU_EVENT_ATTR(csw-read-block,                    0x06),
241         XGENE_PMU_EVENT_ATTR(csw-read-partial,                  0x07),
242         XGENE_PMU_EVENT_ATTR(axi0-write,                        0x10),
243         XGENE_PMU_EVENT_ATTR(axi0-write-partial,                0x11),
244         XGENE_PMU_EVENT_ATTR(axi1-write,                        0x13),
245         XGENE_PMU_EVENT_ATTR(axi1-write-partial,                0x14),
246         XGENE_PMU_EVENT_ATTR(csw-inbound-dirty,                 0x16),
247         NULL,
248 };
249
250 static struct attribute *mcb_pmu_events_attrs[] = {
251         XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
252         XGENE_PMU_EVENT_ATTR(cycle-count-div-64,                0x01),
253         XGENE_PMU_EVENT_ATTR(csw-read,                          0x02),
254         XGENE_PMU_EVENT_ATTR(csw-write-request,                 0x03),
255         XGENE_PMU_EVENT_ATTR(mcb-csw-stall,                     0x04),
256         XGENE_PMU_EVENT_ATTR(cancel-read-gack,                  0x05),
257         NULL,
258 };
259
260 static struct attribute *mc_pmu_events_attrs[] = {
261         XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
262         XGENE_PMU_EVENT_ATTR(cycle-count-div-64,                0x01),
263         XGENE_PMU_EVENT_ATTR(act-cmd-sent,                      0x02),
264         XGENE_PMU_EVENT_ATTR(pre-cmd-sent,                      0x03),
265         XGENE_PMU_EVENT_ATTR(rd-cmd-sent,                       0x04),
266         XGENE_PMU_EVENT_ATTR(rda-cmd-sent,                      0x05),
267         XGENE_PMU_EVENT_ATTR(wr-cmd-sent,                       0x06),
268         XGENE_PMU_EVENT_ATTR(wra-cmd-sent,                      0x07),
269         XGENE_PMU_EVENT_ATTR(pde-cmd-sent,                      0x08),
270         XGENE_PMU_EVENT_ATTR(sre-cmd-sent,                      0x09),
271         XGENE_PMU_EVENT_ATTR(prea-cmd-sent,                     0x0a),
272         XGENE_PMU_EVENT_ATTR(ref-cmd-sent,                      0x0b),
273         XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent,                   0x0c),
274         XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent,                   0x0d),
275         XGENE_PMU_EVENT_ATTR(in-rd-collision,                   0x0e),
276         XGENE_PMU_EVENT_ATTR(in-wr-collision,                   0x0f),
277         XGENE_PMU_EVENT_ATTR(collision-queue-not-empty,         0x10),
278         XGENE_PMU_EVENT_ATTR(collision-queue-full,              0x11),
279         XGENE_PMU_EVENT_ATTR(mcu-request,                       0x12),
280         XGENE_PMU_EVENT_ATTR(mcu-rd-request,                    0x13),
281         XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request,                 0x14),
282         XGENE_PMU_EVENT_ATTR(mcu-wr-request,                    0x15),
283         XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all,                0x16),
284         XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel,             0x17),
285         XGENE_PMU_EVENT_ATTR(mcu-rd-response,                   0x18),
286         XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all,    0x19),
287         XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
288         XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all,                0x1b),
289         XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel,             0x1c),
290         NULL,
291 };
292
293 static const struct attribute_group l3c_pmu_events_attr_group = {
294         .name = "events",
295         .attrs = l3c_pmu_events_attrs,
296 };
297
298 static const struct attribute_group iob_pmu_events_attr_group = {
299         .name = "events",
300         .attrs = iob_pmu_events_attrs,
301 };
302
303 static const struct attribute_group mcb_pmu_events_attr_group = {
304         .name = "events",
305         .attrs = mcb_pmu_events_attrs,
306 };
307
308 static const struct attribute_group mc_pmu_events_attr_group = {
309         .name = "events",
310         .attrs = mc_pmu_events_attrs,
311 };
312
313 /*
314  * sysfs cpumask attributes
315  */
316 static ssize_t xgene_pmu_cpumask_show(struct device *dev,
317                                       struct device_attribute *attr, char *buf)
318 {
319         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
320
321         return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
322 }
323
324 static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
325
326 static struct attribute *xgene_pmu_cpumask_attrs[] = {
327         &dev_attr_cpumask.attr,
328         NULL,
329 };
330
331 static const struct attribute_group pmu_cpumask_attr_group = {
332         .attrs = xgene_pmu_cpumask_attrs,
333 };
334
335 /*
336  * Per PMU device attribute groups
337  */
338 static const struct attribute_group *l3c_pmu_attr_groups[] = {
339         &l3c_pmu_format_attr_group,
340         &pmu_cpumask_attr_group,
341         &l3c_pmu_events_attr_group,
342         NULL
343 };
344
345 static const struct attribute_group *iob_pmu_attr_groups[] = {
346         &iob_pmu_format_attr_group,
347         &pmu_cpumask_attr_group,
348         &iob_pmu_events_attr_group,
349         NULL
350 };
351
352 static const struct attribute_group *mcb_pmu_attr_groups[] = {
353         &mcb_pmu_format_attr_group,
354         &pmu_cpumask_attr_group,
355         &mcb_pmu_events_attr_group,
356         NULL
357 };
358
359 static const struct attribute_group *mc_pmu_attr_groups[] = {
360         &mc_pmu_format_attr_group,
361         &pmu_cpumask_attr_group,
362         &mc_pmu_events_attr_group,
363         NULL
364 };
365
366 static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
367 {
368         int cntr;
369
370         cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
371                                 pmu_dev->max_counters);
372         if (cntr == pmu_dev->max_counters)
373                 return -ENOSPC;
374         set_bit(cntr, pmu_dev->cntr_assign_mask);
375
376         return cntr;
377 }
378
379 static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
380 {
381         clear_bit(cntr, pmu_dev->cntr_assign_mask);
382 }
383
384 static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
385 {
386         writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
387 }
388
389 static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
390 {
391         writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
392 }
393
394 static inline u32 xgene_pmu_read_counter(struct xgene_pmu_dev *pmu_dev, int idx)
395 {
396         return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
397 }
398
399 static inline void
400 xgene_pmu_write_counter(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
401 {
402         writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
403 }
404
405 static inline void
406 xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
407 {
408         writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
409 }
410
411 static inline void
412 xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
413 {
414         writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
415 }
416
417 static inline void
418 xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
419 {
420         writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
421 }
422
423 static inline void
424 xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
425 {
426         u32 val;
427
428         val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
429         val |= 1 << idx;
430         writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
431 }
432
433 static inline void
434 xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
435 {
436         u32 val;
437
438         val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
439         val |= 1 << idx;
440         writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
441 }
442
443 static inline void
444 xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
445 {
446         u32 val;
447
448         val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
449         val |= 1 << idx;
450         writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
451 }
452
453 static inline void
454 xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
455 {
456         u32 val;
457
458         val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
459         val |= 1 << idx;
460         writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
461 }
462
463 static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
464 {
465         u32 val;
466
467         val = readl(pmu_dev->inf->csr + PMU_PMCR);
468         val |= PMU_PMCR_P;
469         writel(val, pmu_dev->inf->csr + PMU_PMCR);
470 }
471
472 static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
473 {
474         u32 val;
475
476         val = readl(pmu_dev->inf->csr + PMU_PMCR);
477         val |= PMU_PMCR_E;
478         writel(val, pmu_dev->inf->csr + PMU_PMCR);
479 }
480
481 static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
482 {
483         u32 val;
484
485         val = readl(pmu_dev->inf->csr + PMU_PMCR);
486         val &= ~PMU_PMCR_E;
487         writel(val, pmu_dev->inf->csr + PMU_PMCR);
488 }
489
490 static void xgene_perf_pmu_enable(struct pmu *pmu)
491 {
492         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
493         int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
494                         pmu_dev->max_counters);
495
496         if (!enabled)
497                 return;
498
499         xgene_pmu_start_counters(pmu_dev);
500 }
501
502 static void xgene_perf_pmu_disable(struct pmu *pmu)
503 {
504         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
505
506         xgene_pmu_stop_counters(pmu_dev);
507 }
508
509 static int xgene_perf_event_init(struct perf_event *event)
510 {
511         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
512         struct hw_perf_event *hw = &event->hw;
513         struct perf_event *sibling;
514
515         /* Test the event attr type check for PMU enumeration */
516         if (event->attr.type != event->pmu->type)
517                 return -ENOENT;
518
519         /*
520          * SOC PMU counters are shared across all cores.
521          * Therefore, it does not support per-process mode.
522          * Also, it does not support event sampling mode.
523          */
524         if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
525                 return -EINVAL;
526
527         /* SOC counters do not have usr/os/guest/host bits */
528         if (event->attr.exclude_user || event->attr.exclude_kernel ||
529             event->attr.exclude_host || event->attr.exclude_guest)
530                 return -EINVAL;
531
532         if (event->cpu < 0)
533                 return -EINVAL;
534         /*
535          * Many perf core operations (eg. events rotation) operate on a
536          * single CPU context. This is obvious for CPU PMUs, where one
537          * expects the same sets of events being observed on all CPUs,
538          * but can lead to issues for off-core PMUs, where each
539          * event could be theoretically assigned to a different CPU. To
540          * mitigate this, we enforce CPU assignment to one, selected
541          * processor (the one described in the "cpumask" attribute).
542          */
543         event->cpu = cpumask_first(&pmu_dev->parent->cpu);
544
545         hw->config = event->attr.config;
546         /*
547          * Each bit of the config1 field represents an agent from which the
548          * request of the event come. The event is counted only if it's caused
549          * by a request of an agent has the bit cleared.
550          * By default, the event is counted for all agents.
551          */
552         hw->config_base = event->attr.config1;
553
554         /*
555          * We must NOT create groups containing mixed PMUs, although software
556          * events are acceptable
557          */
558         if (event->group_leader->pmu != event->pmu &&
559                         !is_software_event(event->group_leader))
560                 return -EINVAL;
561
562         list_for_each_entry(sibling, &event->group_leader->sibling_list,
563                         group_entry)
564                 if (sibling->pmu != event->pmu &&
565                                 !is_software_event(sibling))
566                         return -EINVAL;
567
568         return 0;
569 }
570
571 static void xgene_perf_enable_event(struct perf_event *event)
572 {
573         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
574
575         xgene_pmu_write_evttype(pmu_dev, GET_CNTR(event), GET_EVENTID(event));
576         xgene_pmu_write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
577         if (pmu_dev->inf->type == PMU_TYPE_IOB)
578                 xgene_pmu_write_agent1msk(pmu_dev, ~((u32)GET_AGENT1ID(event)));
579
580         xgene_pmu_enable_counter(pmu_dev, GET_CNTR(event));
581         xgene_pmu_enable_counter_int(pmu_dev, GET_CNTR(event));
582 }
583
584 static void xgene_perf_disable_event(struct perf_event *event)
585 {
586         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
587
588         xgene_pmu_disable_counter(pmu_dev, GET_CNTR(event));
589         xgene_pmu_disable_counter_int(pmu_dev, GET_CNTR(event));
590 }
591
592 static void xgene_perf_event_set_period(struct perf_event *event)
593 {
594         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
595         struct hw_perf_event *hw = &event->hw;
596         /*
597          * The X-Gene PMU counters have a period of 2^32. To account for the
598          * possiblity of extreme interrupt latency we program for a period of
599          * half that. Hopefully we can handle the interrupt before another 2^31
600          * events occur and the counter overtakes its previous value.
601          */
602         u64 val = 1ULL << 31;
603
604         local64_set(&hw->prev_count, val);
605         xgene_pmu_write_counter(pmu_dev, hw->idx, (u32) val);
606 }
607
608 static void xgene_perf_event_update(struct perf_event *event)
609 {
610         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
611         struct hw_perf_event *hw = &event->hw;
612         u64 delta, prev_raw_count, new_raw_count;
613
614 again:
615         prev_raw_count = local64_read(&hw->prev_count);
616         new_raw_count = xgene_pmu_read_counter(pmu_dev, GET_CNTR(event));
617
618         if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
619                             new_raw_count) != prev_raw_count)
620                 goto again;
621
622         delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
623
624         local64_add(delta, &event->count);
625 }
626
627 static void xgene_perf_read(struct perf_event *event)
628 {
629         xgene_perf_event_update(event);
630 }
631
632 static void xgene_perf_start(struct perf_event *event, int flags)
633 {
634         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
635         struct hw_perf_event *hw = &event->hw;
636
637         if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
638                 return;
639
640         WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
641         hw->state = 0;
642
643         xgene_perf_event_set_period(event);
644
645         if (flags & PERF_EF_RELOAD) {
646                 u64 prev_raw_count =  local64_read(&hw->prev_count);
647
648                 xgene_pmu_write_counter(pmu_dev, GET_CNTR(event),
649                                         (u32) prev_raw_count);
650         }
651
652         xgene_perf_enable_event(event);
653         perf_event_update_userpage(event);
654 }
655
656 static void xgene_perf_stop(struct perf_event *event, int flags)
657 {
658         struct hw_perf_event *hw = &event->hw;
659         u64 config;
660
661         if (hw->state & PERF_HES_UPTODATE)
662                 return;
663
664         xgene_perf_disable_event(event);
665         WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
666         hw->state |= PERF_HES_STOPPED;
667
668         if (hw->state & PERF_HES_UPTODATE)
669                 return;
670
671         config = hw->config;
672         xgene_perf_read(event);
673         hw->state |= PERF_HES_UPTODATE;
674 }
675
676 static int xgene_perf_add(struct perf_event *event, int flags)
677 {
678         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
679         struct hw_perf_event *hw = &event->hw;
680
681         hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
682
683         /* Allocate an event counter */
684         hw->idx = get_next_avail_cntr(pmu_dev);
685         if (hw->idx < 0)
686                 return -EAGAIN;
687
688         /* Update counter event pointer for Interrupt handler */
689         pmu_dev->pmu_counter_event[hw->idx] = event;
690
691         if (flags & PERF_EF_START)
692                 xgene_perf_start(event, PERF_EF_RELOAD);
693
694         return 0;
695 }
696
697 static void xgene_perf_del(struct perf_event *event, int flags)
698 {
699         struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
700         struct hw_perf_event *hw = &event->hw;
701
702         xgene_perf_stop(event, PERF_EF_UPDATE);
703
704         /* clear the assigned counter */
705         clear_avail_cntr(pmu_dev, GET_CNTR(event));
706
707         perf_event_update_userpage(event);
708         pmu_dev->pmu_counter_event[hw->idx] = NULL;
709 }
710
711 static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
712 {
713         struct xgene_pmu *xgene_pmu;
714
715         pmu_dev->max_period = PMU_CNT_MAX_PERIOD - 1;
716         /* First version PMU supports only single event counter */
717         xgene_pmu = pmu_dev->parent;
718         if (xgene_pmu->version == PCP_PMU_V1)
719                 pmu_dev->max_counters = 1;
720         else
721                 pmu_dev->max_counters = PMU_MAX_COUNTERS;
722
723         /* Perf driver registration */
724         pmu_dev->pmu = (struct pmu) {
725                 .attr_groups    = pmu_dev->attr_groups,
726                 .task_ctx_nr    = perf_invalid_context,
727                 .pmu_enable     = xgene_perf_pmu_enable,
728                 .pmu_disable    = xgene_perf_pmu_disable,
729                 .event_init     = xgene_perf_event_init,
730                 .add            = xgene_perf_add,
731                 .del            = xgene_perf_del,
732                 .start          = xgene_perf_start,
733                 .stop           = xgene_perf_stop,
734                 .read           = xgene_perf_read,
735         };
736
737         /* Hardware counter init */
738         xgene_pmu_stop_counters(pmu_dev);
739         xgene_pmu_reset_counters(pmu_dev);
740
741         return perf_pmu_register(&pmu_dev->pmu, name, -1);
742 }
743
744 static int
745 xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
746 {
747         struct device *dev = xgene_pmu->dev;
748         struct xgene_pmu_dev *pmu;
749         int rc;
750
751         pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
752         if (!pmu)
753                 return -ENOMEM;
754         pmu->parent = xgene_pmu;
755         pmu->inf = &ctx->inf;
756         ctx->pmu_dev = pmu;
757
758         switch (pmu->inf->type) {
759         case PMU_TYPE_L3C:
760                 pmu->attr_groups = l3c_pmu_attr_groups;
761                 break;
762         case PMU_TYPE_IOB:
763                 pmu->attr_groups = iob_pmu_attr_groups;
764                 break;
765         case PMU_TYPE_MCB:
766                 if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
767                         goto dev_err;
768                 pmu->attr_groups = mcb_pmu_attr_groups;
769                 break;
770         case PMU_TYPE_MC:
771                 if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
772                         goto dev_err;
773                 pmu->attr_groups = mc_pmu_attr_groups;
774                 break;
775         default:
776                 return -EINVAL;
777         }
778
779         rc = xgene_init_perf(pmu, ctx->name);
780         if (rc) {
781                 dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
782                 goto dev_err;
783         }
784
785         dev_info(dev, "%s PMU registered\n", ctx->name);
786
787         return rc;
788
789 dev_err:
790         devm_kfree(dev, pmu);
791         return -ENODEV;
792 }
793
794 static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
795 {
796         struct xgene_pmu *xgene_pmu = pmu_dev->parent;
797         u32 pmovsr;
798         int idx;
799
800         pmovsr = readl(pmu_dev->inf->csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
801         if (!pmovsr)
802                 return;
803
804         /* Clear interrupt flag */
805         if (xgene_pmu->version == PCP_PMU_V1)
806                 writel(0x0, pmu_dev->inf->csr + PMU_PMOVSR);
807         else
808                 writel(pmovsr, pmu_dev->inf->csr + PMU_PMOVSR);
809
810         for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
811                 struct perf_event *event = pmu_dev->pmu_counter_event[idx];
812                 int overflowed = pmovsr & BIT(idx);
813
814                 /* Ignore if we don't have an event. */
815                 if (!event || !overflowed)
816                         continue;
817                 xgene_perf_event_update(event);
818                 xgene_perf_event_set_period(event);
819         }
820 }
821
822 static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
823 {
824         struct xgene_pmu_dev_ctx *ctx;
825         struct xgene_pmu *xgene_pmu = dev_id;
826         unsigned long flags;
827         u32 val;
828
829         raw_spin_lock_irqsave(&xgene_pmu->lock, flags);
830
831         /* Get Interrupt PMU source */
832         val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
833         if (val & PCPPMU_INT_MCU) {
834                 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
835                         _xgene_pmu_isr(irq, ctx->pmu_dev);
836                 }
837         }
838         if (val & PCPPMU_INT_MCB) {
839                 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
840                         _xgene_pmu_isr(irq, ctx->pmu_dev);
841                 }
842         }
843         if (val & PCPPMU_INT_L3C) {
844                 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
845                         _xgene_pmu_isr(irq, ctx->pmu_dev);
846                 }
847         }
848         if (val & PCPPMU_INT_IOB) {
849                 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
850                         _xgene_pmu_isr(irq, ctx->pmu_dev);
851                 }
852         }
853
854         raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags);
855
856         return IRQ_HANDLED;
857 }
858
859 static int acpi_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
860                                          struct platform_device *pdev)
861 {
862         void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
863         struct resource *res;
864         unsigned int reg;
865
866         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
867         csw_csr = devm_ioremap_resource(&pdev->dev, res);
868         if (IS_ERR(csw_csr)) {
869                 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
870                 return PTR_ERR(csw_csr);
871         }
872
873         res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
874         mcba_csr = devm_ioremap_resource(&pdev->dev, res);
875         if (IS_ERR(mcba_csr)) {
876                 dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
877                 return PTR_ERR(mcba_csr);
878         }
879
880         res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
881         mcbb_csr = devm_ioremap_resource(&pdev->dev, res);
882         if (IS_ERR(mcbb_csr)) {
883                 dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
884                 return PTR_ERR(mcbb_csr);
885         }
886
887         reg = readl(csw_csr + CSW_CSWCR);
888         if (reg & CSW_CSWCR_DUALMCB_MASK) {
889                 /* Dual MCB active */
890                 xgene_pmu->mcb_active_mask = 0x3;
891                 /* Probe all active MC(s) */
892                 reg = readl(mcbb_csr + CSW_CSWCR);
893                 xgene_pmu->mc_active_mask =
894                         (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
895         } else {
896                 /* Single MCB active */
897                 xgene_pmu->mcb_active_mask = 0x1;
898                 /* Probe all active MC(s) */
899                 reg = readl(mcba_csr + CSW_CSWCR);
900                 xgene_pmu->mc_active_mask =
901                         (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
902         }
903
904         return 0;
905 }
906
907 static int fdt_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
908                                         struct platform_device *pdev)
909 {
910         struct regmap *csw_map, *mcba_map, *mcbb_map;
911         struct device_node *np = pdev->dev.of_node;
912         unsigned int reg;
913
914         csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
915         if (IS_ERR(csw_map)) {
916                 dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
917                 return PTR_ERR(csw_map);
918         }
919
920         mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
921         if (IS_ERR(mcba_map)) {
922                 dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
923                 return PTR_ERR(mcba_map);
924         }
925
926         mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
927         if (IS_ERR(mcbb_map)) {
928                 dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
929                 return PTR_ERR(mcbb_map);
930         }
931
932         if (regmap_read(csw_map, CSW_CSWCR, &reg))
933                 return -EINVAL;
934
935         if (reg & CSW_CSWCR_DUALMCB_MASK) {
936                 /* Dual MCB active */
937                 xgene_pmu->mcb_active_mask = 0x3;
938                 /* Probe all active MC(s) */
939                 if (regmap_read(mcbb_map, MCBADDRMR, &reg))
940                         return 0;
941                 xgene_pmu->mc_active_mask =
942                         (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
943         } else {
944                 /* Single MCB active */
945                 xgene_pmu->mcb_active_mask = 0x1;
946                 /* Probe all active MC(s) */
947                 if (regmap_read(mcba_map, MCBADDRMR, &reg))
948                         return 0;
949                 xgene_pmu->mc_active_mask =
950                         (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
951         }
952
953         return 0;
954 }
955
956 static int xgene_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
957                                           struct platform_device *pdev)
958 {
959         if (has_acpi_companion(&pdev->dev))
960                 return acpi_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
961         return fdt_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
962 }
963
964 static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
965 {
966         switch (type) {
967         case PMU_TYPE_L3C:
968                 return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
969         case PMU_TYPE_IOB:
970                 return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
971         case PMU_TYPE_MCB:
972                 return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
973         case PMU_TYPE_MC:
974                 return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
975         default:
976                 return devm_kasprintf(dev, GFP_KERNEL, "unknown");
977         }
978 }
979
980 #if defined(CONFIG_ACPI)
981 static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
982 {
983         struct resource *res = data;
984
985         if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
986                 acpi_dev_resource_memory(ares, res);
987
988         /* Always tell the ACPI core to skip this resource */
989         return 1;
990 }
991
992 static struct
993 xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
994                                        struct acpi_device *adev, u32 type)
995 {
996         struct device *dev = xgene_pmu->dev;
997         struct list_head resource_list;
998         struct xgene_pmu_dev_ctx *ctx;
999         const union acpi_object *obj;
1000         struct hw_pmu_info *inf;
1001         void __iomem *dev_csr;
1002         struct resource res;
1003         int enable_bit;
1004         int rc;
1005
1006         ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1007         if (!ctx)
1008                 return NULL;
1009
1010         INIT_LIST_HEAD(&resource_list);
1011         rc = acpi_dev_get_resources(adev, &resource_list,
1012                                     acpi_pmu_dev_add_resource, &res);
1013         acpi_dev_free_resource_list(&resource_list);
1014         if (rc < 0) {
1015                 dev_err(dev, "PMU type %d: No resource address found\n", type);
1016                 goto err;
1017         }
1018
1019         dev_csr = devm_ioremap_resource(dev, &res);
1020         if (IS_ERR(dev_csr)) {
1021                 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1022                 goto err;
1023         }
1024
1025         /* A PMU device node without enable-bit-index is always enabled */
1026         rc = acpi_dev_get_property(adev, "enable-bit-index",
1027                                    ACPI_TYPE_INTEGER, &obj);
1028         if (rc < 0)
1029                 enable_bit = 0;
1030         else
1031                 enable_bit = (int) obj->integer.value;
1032
1033         ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1034         if (!ctx->name) {
1035                 dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1036                 goto err;
1037         }
1038         inf = &ctx->inf;
1039         inf->type = type;
1040         inf->csr = dev_csr;
1041         inf->enable_mask = 1 << enable_bit;
1042
1043         return ctx;
1044 err:
1045         devm_kfree(dev, ctx);
1046         return NULL;
1047 }
1048
1049 static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
1050                                     void *data, void **return_value)
1051 {
1052         struct xgene_pmu *xgene_pmu = data;
1053         struct xgene_pmu_dev_ctx *ctx;
1054         struct acpi_device *adev;
1055
1056         if (acpi_bus_get_device(handle, &adev))
1057                 return AE_OK;
1058         if (acpi_bus_get_status(adev) || !adev->status.present)
1059                 return AE_OK;
1060
1061         if (!strcmp(acpi_device_hid(adev), "APMC0D5D"))
1062                 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_L3C);
1063         else if (!strcmp(acpi_device_hid(adev), "APMC0D5E"))
1064                 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_IOB);
1065         else if (!strcmp(acpi_device_hid(adev), "APMC0D5F"))
1066                 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MCB);
1067         else if (!strcmp(acpi_device_hid(adev), "APMC0D60"))
1068                 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MC);
1069         else
1070                 ctx = NULL;
1071
1072         if (!ctx)
1073                 return AE_OK;
1074
1075         if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1076                 /* Can't add the PMU device, skip it */
1077                 devm_kfree(xgene_pmu->dev, ctx);
1078                 return AE_OK;
1079         }
1080
1081         switch (ctx->inf.type) {
1082         case PMU_TYPE_L3C:
1083                 list_add(&ctx->next, &xgene_pmu->l3cpmus);
1084                 break;
1085         case PMU_TYPE_IOB:
1086                 list_add(&ctx->next, &xgene_pmu->iobpmus);
1087                 break;
1088         case PMU_TYPE_MCB:
1089                 list_add(&ctx->next, &xgene_pmu->mcbpmus);
1090                 break;
1091         case PMU_TYPE_MC:
1092                 list_add(&ctx->next, &xgene_pmu->mcpmus);
1093                 break;
1094         }
1095         return AE_OK;
1096 }
1097
1098 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1099                                   struct platform_device *pdev)
1100 {
1101         struct device *dev = xgene_pmu->dev;
1102         acpi_handle handle;
1103         acpi_status status;
1104
1105         handle = ACPI_HANDLE(dev);
1106         if (!handle)
1107                 return -EINVAL;
1108
1109         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1110                                      acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
1111         if (ACPI_FAILURE(status)) {
1112                 dev_err(dev, "failed to probe PMU devices\n");
1113                 return -ENODEV;
1114         }
1115
1116         return 0;
1117 }
1118 #else
1119 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1120                                   struct platform_device *pdev)
1121 {
1122         return 0;
1123 }
1124 #endif
1125
1126 static struct
1127 xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1128                                       struct device_node *np, u32 type)
1129 {
1130         struct device *dev = xgene_pmu->dev;
1131         struct xgene_pmu_dev_ctx *ctx;
1132         struct hw_pmu_info *inf;
1133         void __iomem *dev_csr;
1134         struct resource res;
1135         int enable_bit;
1136         int rc;
1137
1138         ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1139         if (!ctx)
1140                 return NULL;
1141         rc = of_address_to_resource(np, 0, &res);
1142         if (rc < 0) {
1143                 dev_err(dev, "PMU type %d: No resource address found\n", type);
1144                 goto err;
1145         }
1146         dev_csr = devm_ioremap_resource(dev, &res);
1147         if (IS_ERR(dev_csr)) {
1148                 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1149                 goto err;
1150         }
1151
1152         /* A PMU device node without enable-bit-index is always enabled */
1153         if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
1154                 enable_bit = 0;
1155
1156         ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1157         if (!ctx->name) {
1158                 dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1159                 goto err;
1160         }
1161         inf = &ctx->inf;
1162         inf->type = type;
1163         inf->csr = dev_csr;
1164         inf->enable_mask = 1 << enable_bit;
1165
1166         return ctx;
1167 err:
1168         devm_kfree(dev, ctx);
1169         return NULL;
1170 }
1171
1172 static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1173                                  struct platform_device *pdev)
1174 {
1175         struct xgene_pmu_dev_ctx *ctx;
1176         struct device_node *np;
1177
1178         for_each_child_of_node(pdev->dev.of_node, np) {
1179                 if (!of_device_is_available(np))
1180                         continue;
1181
1182                 if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
1183                         ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
1184                 else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
1185                         ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
1186                 else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
1187                         ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
1188                 else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
1189                         ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
1190                 else
1191                         ctx = NULL;
1192
1193                 if (!ctx)
1194                         continue;
1195
1196                 if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1197                         /* Can't add the PMU device, skip it */
1198                         devm_kfree(xgene_pmu->dev, ctx);
1199                         continue;
1200                 }
1201
1202                 switch (ctx->inf.type) {
1203                 case PMU_TYPE_L3C:
1204                         list_add(&ctx->next, &xgene_pmu->l3cpmus);
1205                         break;
1206                 case PMU_TYPE_IOB:
1207                         list_add(&ctx->next, &xgene_pmu->iobpmus);
1208                         break;
1209                 case PMU_TYPE_MCB:
1210                         list_add(&ctx->next, &xgene_pmu->mcbpmus);
1211                         break;
1212                 case PMU_TYPE_MC:
1213                         list_add(&ctx->next, &xgene_pmu->mcpmus);
1214                         break;
1215                 }
1216         }
1217
1218         return 0;
1219 }
1220
1221 static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1222                                    struct platform_device *pdev)
1223 {
1224         if (has_acpi_companion(&pdev->dev))
1225                 return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
1226         return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
1227 }
1228
1229 static const struct xgene_pmu_data xgene_pmu_data = {
1230         .id   = PCP_PMU_V1,
1231 };
1232
1233 static const struct xgene_pmu_data xgene_pmu_v2_data = {
1234         .id   = PCP_PMU_V2,
1235 };
1236
1237 static const struct of_device_id xgene_pmu_of_match[] = {
1238         { .compatible   = "apm,xgene-pmu",      .data = &xgene_pmu_data },
1239         { .compatible   = "apm,xgene-pmu-v2",   .data = &xgene_pmu_v2_data },
1240         {},
1241 };
1242 MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
1243 #ifdef CONFIG_ACPI
1244 static const struct acpi_device_id xgene_pmu_acpi_match[] = {
1245         {"APMC0D5B", PCP_PMU_V1},
1246         {"APMC0D5C", PCP_PMU_V2},
1247         {},
1248 };
1249 MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
1250 #endif
1251
1252 static int xgene_pmu_probe(struct platform_device *pdev)
1253 {
1254         const struct xgene_pmu_data *dev_data;
1255         const struct of_device_id *of_id;
1256         struct xgene_pmu *xgene_pmu;
1257         struct resource *res;
1258         int irq, rc;
1259         int version;
1260
1261         xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
1262         if (!xgene_pmu)
1263                 return -ENOMEM;
1264         xgene_pmu->dev = &pdev->dev;
1265         platform_set_drvdata(pdev, xgene_pmu);
1266
1267         version = -EINVAL;
1268         of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
1269         if (of_id) {
1270                 dev_data = (const struct xgene_pmu_data *) of_id->data;
1271                 version = dev_data->id;
1272         }
1273
1274 #ifdef CONFIG_ACPI
1275         if (ACPI_COMPANION(&pdev->dev)) {
1276                 const struct acpi_device_id *acpi_id;
1277
1278                 acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
1279                 if (acpi_id)
1280                         version = (int) acpi_id->driver_data;
1281         }
1282 #endif
1283         if (version < 0)
1284                 return -ENODEV;
1285
1286         INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
1287         INIT_LIST_HEAD(&xgene_pmu->iobpmus);
1288         INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
1289         INIT_LIST_HEAD(&xgene_pmu->mcpmus);
1290
1291         xgene_pmu->version = version;
1292         dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
1293
1294         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1295         xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
1296         if (IS_ERR(xgene_pmu->pcppmu_csr)) {
1297                 dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
1298                 rc = PTR_ERR(xgene_pmu->pcppmu_csr);
1299                 goto err;
1300         }
1301
1302         irq = platform_get_irq(pdev, 0);
1303         if (irq < 0) {
1304                 dev_err(&pdev->dev, "No IRQ resource\n");
1305                 rc = -EINVAL;
1306                 goto err;
1307         }
1308         rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
1309                                 IRQF_NOBALANCING | IRQF_NO_THREAD,
1310                                 dev_name(&pdev->dev), xgene_pmu);
1311         if (rc) {
1312                 dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
1313                 goto err;
1314         }
1315
1316         raw_spin_lock_init(&xgene_pmu->lock);
1317
1318         /* Check for active MCBs and MCUs */
1319         rc = xgene_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
1320         if (rc) {
1321                 dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
1322                 xgene_pmu->mcb_active_mask = 0x1;
1323                 xgene_pmu->mc_active_mask = 0x1;
1324         }
1325
1326         /* Pick one core to use for cpumask attributes */
1327         cpumask_set_cpu(smp_processor_id(), &xgene_pmu->cpu);
1328
1329         /* Make sure that the overflow interrupt is handled by this CPU */
1330         rc = irq_set_affinity(irq, &xgene_pmu->cpu);
1331         if (rc) {
1332                 dev_err(&pdev->dev, "Failed to set interrupt affinity!\n");
1333                 goto err;
1334         }
1335
1336         /* Walk through the tree for all PMU perf devices */
1337         rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
1338         if (rc) {
1339                 dev_err(&pdev->dev, "No PMU perf devices found!\n");
1340                 goto err;
1341         }
1342
1343         /* Enable interrupt */
1344         xgene_pmu_unmask_int(xgene_pmu);
1345
1346         return 0;
1347
1348 err:
1349         if (xgene_pmu->pcppmu_csr)
1350                 devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
1351         devm_kfree(&pdev->dev, xgene_pmu);
1352
1353         return rc;
1354 }
1355
1356 static void
1357 xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
1358 {
1359         struct xgene_pmu_dev_ctx *ctx;
1360         struct device *dev = xgene_pmu->dev;
1361         struct xgene_pmu_dev *pmu_dev;
1362
1363         list_for_each_entry(ctx, pmus, next) {
1364                 pmu_dev = ctx->pmu_dev;
1365                 if (pmu_dev->inf->csr)
1366                         devm_iounmap(dev, pmu_dev->inf->csr);
1367                 devm_kfree(dev, ctx);
1368                 devm_kfree(dev, pmu_dev);
1369         }
1370 }
1371
1372 static int xgene_pmu_remove(struct platform_device *pdev)
1373 {
1374         struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
1375
1376         xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
1377         xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
1378         xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
1379         xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
1380
1381         if (xgene_pmu->pcppmu_csr)
1382                 devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
1383         devm_kfree(&pdev->dev, xgene_pmu);
1384
1385         return 0;
1386 }
1387
1388 static struct platform_driver xgene_pmu_driver = {
1389         .probe = xgene_pmu_probe,
1390         .remove = xgene_pmu_remove,
1391         .driver = {
1392                 .name           = "xgene-pmu",
1393                 .of_match_table = xgene_pmu_of_match,
1394                 .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
1395         },
1396 };
1397
1398 builtin_platform_driver(xgene_pmu_driver);