Merge branch 'irq/urgent' into irq/core
[cascardo/linux.git] / drivers / irqchip / irq-gic-v3.c
1 /*
2  * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #define pr_fmt(fmt)     "GICv3: " fmt
19
20 #include <linux/acpi.h>
21 #include <linux/cpu.h>
22 #include <linux/cpu_pm.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/of_irq.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31
32 #include <linux/irqchip.h>
33 #include <linux/irqchip/arm-gic-common.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/irq-partition-percpu.h>
36
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
39 #include <asm/smp_plat.h>
40 #include <asm/virt.h>
41
42 #include "irq-gic-common.h"
43
44 struct redist_region {
45         void __iomem            *redist_base;
46         phys_addr_t             phys_base;
47         bool                    single_redist;
48 };
49
50 struct gic_chip_data {
51         struct fwnode_handle    *fwnode;
52         void __iomem            *dist_base;
53         struct redist_region    *redist_regions;
54         struct rdists           rdists;
55         struct irq_domain       *domain;
56         u64                     redist_stride;
57         u32                     nr_redist_regions;
58         unsigned int            irq_nr;
59         struct partition_desc   *ppi_descs[16];
60 };
61
62 static struct gic_chip_data gic_data __read_mostly;
63 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
64
65 static struct gic_kvm_info gic_v3_kvm_info;
66
67 #define gic_data_rdist()                (this_cpu_ptr(gic_data.rdists.rdist))
68 #define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
69 #define gic_data_rdist_sgi_base()       (gic_data_rdist_rd_base() + SZ_64K)
70
71 /* Our default, arbitrary priority value. Linux only uses one anyway. */
72 #define DEFAULT_PMR_VALUE       0xf0
73
74 static inline unsigned int gic_irq(struct irq_data *d)
75 {
76         return d->hwirq;
77 }
78
79 static inline int gic_irq_in_rdist(struct irq_data *d)
80 {
81         return gic_irq(d) < 32;
82 }
83
84 static inline void __iomem *gic_dist_base(struct irq_data *d)
85 {
86         if (gic_irq_in_rdist(d))        /* SGI+PPI -> SGI_base for this CPU */
87                 return gic_data_rdist_sgi_base();
88
89         if (d->hwirq <= 1023)           /* SPI -> dist_base */
90                 return gic_data.dist_base;
91
92         return NULL;
93 }
94
95 static void gic_do_wait_for_rwp(void __iomem *base)
96 {
97         u32 count = 1000000;    /* 1s! */
98
99         while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
100                 count--;
101                 if (!count) {
102                         pr_err_ratelimited("RWP timeout, gone fishing\n");
103                         return;
104                 }
105                 cpu_relax();
106                 udelay(1);
107         };
108 }
109
110 /* Wait for completion of a distributor change */
111 static void gic_dist_wait_for_rwp(void)
112 {
113         gic_do_wait_for_rwp(gic_data.dist_base);
114 }
115
116 /* Wait for completion of a redistributor change */
117 static void gic_redist_wait_for_rwp(void)
118 {
119         gic_do_wait_for_rwp(gic_data_rdist_rd_base());
120 }
121
122 #ifdef CONFIG_ARM64
123 static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
124
125 static u64 __maybe_unused gic_read_iar(void)
126 {
127         if (static_branch_unlikely(&is_cavium_thunderx))
128                 return gic_read_iar_cavium_thunderx();
129         else
130                 return gic_read_iar_common();
131 }
132 #endif
133
134 static void gic_enable_redist(bool enable)
135 {
136         void __iomem *rbase;
137         u32 count = 1000000;    /* 1s! */
138         u32 val;
139
140         rbase = gic_data_rdist_rd_base();
141
142         val = readl_relaxed(rbase + GICR_WAKER);
143         if (enable)
144                 /* Wake up this CPU redistributor */
145                 val &= ~GICR_WAKER_ProcessorSleep;
146         else
147                 val |= GICR_WAKER_ProcessorSleep;
148         writel_relaxed(val, rbase + GICR_WAKER);
149
150         if (!enable) {          /* Check that GICR_WAKER is writeable */
151                 val = readl_relaxed(rbase + GICR_WAKER);
152                 if (!(val & GICR_WAKER_ProcessorSleep))
153                         return; /* No PM support in this redistributor */
154         }
155
156         while (count--) {
157                 val = readl_relaxed(rbase + GICR_WAKER);
158                 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
159                         break;
160                 cpu_relax();
161                 udelay(1);
162         };
163         if (!count)
164                 pr_err_ratelimited("redistributor failed to %s...\n",
165                                    enable ? "wakeup" : "sleep");
166 }
167
168 /*
169  * Routines to disable, enable, EOI and route interrupts
170  */
171 static int gic_peek_irq(struct irq_data *d, u32 offset)
172 {
173         u32 mask = 1 << (gic_irq(d) % 32);
174         void __iomem *base;
175
176         if (gic_irq_in_rdist(d))
177                 base = gic_data_rdist_sgi_base();
178         else
179                 base = gic_data.dist_base;
180
181         return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
182 }
183
184 static void gic_poke_irq(struct irq_data *d, u32 offset)
185 {
186         u32 mask = 1 << (gic_irq(d) % 32);
187         void (*rwp_wait)(void);
188         void __iomem *base;
189
190         if (gic_irq_in_rdist(d)) {
191                 base = gic_data_rdist_sgi_base();
192                 rwp_wait = gic_redist_wait_for_rwp;
193         } else {
194                 base = gic_data.dist_base;
195                 rwp_wait = gic_dist_wait_for_rwp;
196         }
197
198         writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
199         rwp_wait();
200 }
201
202 static void gic_mask_irq(struct irq_data *d)
203 {
204         gic_poke_irq(d, GICD_ICENABLER);
205 }
206
207 static void gic_eoimode1_mask_irq(struct irq_data *d)
208 {
209         gic_mask_irq(d);
210         /*
211          * When masking a forwarded interrupt, make sure it is
212          * deactivated as well.
213          *
214          * This ensures that an interrupt that is getting
215          * disabled/masked will not get "stuck", because there is
216          * noone to deactivate it (guest is being terminated).
217          */
218         if (irqd_is_forwarded_to_vcpu(d))
219                 gic_poke_irq(d, GICD_ICACTIVER);
220 }
221
222 static void gic_unmask_irq(struct irq_data *d)
223 {
224         gic_poke_irq(d, GICD_ISENABLER);
225 }
226
227 static int gic_irq_set_irqchip_state(struct irq_data *d,
228                                      enum irqchip_irq_state which, bool val)
229 {
230         u32 reg;
231
232         if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
233                 return -EINVAL;
234
235         switch (which) {
236         case IRQCHIP_STATE_PENDING:
237                 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
238                 break;
239
240         case IRQCHIP_STATE_ACTIVE:
241                 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
242                 break;
243
244         case IRQCHIP_STATE_MASKED:
245                 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
246                 break;
247
248         default:
249                 return -EINVAL;
250         }
251
252         gic_poke_irq(d, reg);
253         return 0;
254 }
255
256 static int gic_irq_get_irqchip_state(struct irq_data *d,
257                                      enum irqchip_irq_state which, bool *val)
258 {
259         if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
260                 return -EINVAL;
261
262         switch (which) {
263         case IRQCHIP_STATE_PENDING:
264                 *val = gic_peek_irq(d, GICD_ISPENDR);
265                 break;
266
267         case IRQCHIP_STATE_ACTIVE:
268                 *val = gic_peek_irq(d, GICD_ISACTIVER);
269                 break;
270
271         case IRQCHIP_STATE_MASKED:
272                 *val = !gic_peek_irq(d, GICD_ISENABLER);
273                 break;
274
275         default:
276                 return -EINVAL;
277         }
278
279         return 0;
280 }
281
282 static void gic_eoi_irq(struct irq_data *d)
283 {
284         gic_write_eoir(gic_irq(d));
285 }
286
287 static void gic_eoimode1_eoi_irq(struct irq_data *d)
288 {
289         /*
290          * No need to deactivate an LPI, or an interrupt that
291          * is is getting forwarded to a vcpu.
292          */
293         if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
294                 return;
295         gic_write_dir(gic_irq(d));
296 }
297
298 static int gic_set_type(struct irq_data *d, unsigned int type)
299 {
300         unsigned int irq = gic_irq(d);
301         void (*rwp_wait)(void);
302         void __iomem *base;
303
304         /* Interrupt configuration for SGIs can't be changed */
305         if (irq < 16)
306                 return -EINVAL;
307
308         /* SPIs have restrictions on the supported types */
309         if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
310                          type != IRQ_TYPE_EDGE_RISING)
311                 return -EINVAL;
312
313         if (gic_irq_in_rdist(d)) {
314                 base = gic_data_rdist_sgi_base();
315                 rwp_wait = gic_redist_wait_for_rwp;
316         } else {
317                 base = gic_data.dist_base;
318                 rwp_wait = gic_dist_wait_for_rwp;
319         }
320
321         return gic_configure_irq(irq, type, base, rwp_wait);
322 }
323
324 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
325 {
326         if (vcpu)
327                 irqd_set_forwarded_to_vcpu(d);
328         else
329                 irqd_clr_forwarded_to_vcpu(d);
330         return 0;
331 }
332
333 static u64 gic_mpidr_to_affinity(unsigned long mpidr)
334 {
335         u64 aff;
336
337         aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
338                MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
339                MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
340                MPIDR_AFFINITY_LEVEL(mpidr, 0));
341
342         return aff;
343 }
344
345 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
346 {
347         u32 irqnr;
348
349         do {
350                 irqnr = gic_read_iar();
351
352                 if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
353                         int err;
354
355                         if (static_key_true(&supports_deactivate))
356                                 gic_write_eoir(irqnr);
357
358                         err = handle_domain_irq(gic_data.domain, irqnr, regs);
359                         if (err) {
360                                 WARN_ONCE(true, "Unexpected interrupt received!\n");
361                                 if (static_key_true(&supports_deactivate)) {
362                                         if (irqnr < 8192)
363                                                 gic_write_dir(irqnr);
364                                 } else {
365                                         gic_write_eoir(irqnr);
366                                 }
367                         }
368                         continue;
369                 }
370                 if (irqnr < 16) {
371                         gic_write_eoir(irqnr);
372                         if (static_key_true(&supports_deactivate))
373                                 gic_write_dir(irqnr);
374 #ifdef CONFIG_SMP
375                         /*
376                          * Unlike GICv2, we don't need an smp_rmb() here.
377                          * The control dependency from gic_read_iar to
378                          * the ISB in gic_write_eoir is enough to ensure
379                          * that any shared data read by handle_IPI will
380                          * be read after the ACK.
381                          */
382                         handle_IPI(irqnr, regs);
383 #else
384                         WARN_ONCE(true, "Unexpected SGI received!\n");
385 #endif
386                         continue;
387                 }
388         } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
389 }
390
391 static void __init gic_dist_init(void)
392 {
393         unsigned int i;
394         u64 affinity;
395         void __iomem *base = gic_data.dist_base;
396
397         /* Disable the distributor */
398         writel_relaxed(0, base + GICD_CTLR);
399         gic_dist_wait_for_rwp();
400
401         /*
402          * Configure SPIs as non-secure Group-1. This will only matter
403          * if the GIC only has a single security state. This will not
404          * do the right thing if the kernel is running in secure mode,
405          * but that's not the intended use case anyway.
406          */
407         for (i = 32; i < gic_data.irq_nr; i += 32)
408                 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
409
410         gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
411
412         /* Enable distributor with ARE, Group1 */
413         writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
414                        base + GICD_CTLR);
415
416         /*
417          * Set all global interrupts to the boot CPU only. ARE must be
418          * enabled.
419          */
420         affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
421         for (i = 32; i < gic_data.irq_nr; i++)
422                 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
423 }
424
425 static int gic_populate_rdist(void)
426 {
427         unsigned long mpidr = cpu_logical_map(smp_processor_id());
428         u64 typer;
429         u32 aff;
430         int i;
431
432         /*
433          * Convert affinity to a 32bit value that can be matched to
434          * GICR_TYPER bits [63:32].
435          */
436         aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
437                MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
438                MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
439                MPIDR_AFFINITY_LEVEL(mpidr, 0));
440
441         for (i = 0; i < gic_data.nr_redist_regions; i++) {
442                 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
443                 u32 reg;
444
445                 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
446                 if (reg != GIC_PIDR2_ARCH_GICv3 &&
447                     reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
448                         pr_warn("No redistributor present @%p\n", ptr);
449                         break;
450                 }
451
452                 do {
453                         typer = gic_read_typer(ptr + GICR_TYPER);
454                         if ((typer >> 32) == aff) {
455                                 u64 offset = ptr - gic_data.redist_regions[i].redist_base;
456                                 gic_data_rdist_rd_base() = ptr;
457                                 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
458                                 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
459                                         smp_processor_id(), mpidr, i,
460                                         &gic_data_rdist()->phys_base);
461                                 return 0;
462                         }
463
464                         if (gic_data.redist_regions[i].single_redist)
465                                 break;
466
467                         if (gic_data.redist_stride) {
468                                 ptr += gic_data.redist_stride;
469                         } else {
470                                 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
471                                 if (typer & GICR_TYPER_VLPIS)
472                                         ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
473                         }
474                 } while (!(typer & GICR_TYPER_LAST));
475         }
476
477         /* We couldn't even deal with ourselves... */
478         WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
479              smp_processor_id(), mpidr);
480         return -ENODEV;
481 }
482
483 static void gic_cpu_sys_reg_init(void)
484 {
485         /*
486          * Need to check that the SRE bit has actually been set. If
487          * not, it means that SRE is disabled at EL2. We're going to
488          * die painfully, and there is nothing we can do about it.
489          *
490          * Kindly inform the luser.
491          */
492         if (!gic_enable_sre())
493                 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
494
495         /* Set priority mask register */
496         gic_write_pmr(DEFAULT_PMR_VALUE);
497
498         /*
499          * Some firmwares hand over to the kernel with the BPR changed from
500          * its reset value (and with a value large enough to prevent
501          * any pre-emptive interrupts from working at all). Writing a zero
502          * to BPR restores is reset value.
503          */
504         gic_write_bpr1(0);
505
506         if (static_key_true(&supports_deactivate)) {
507                 /* EOI drops priority only (mode 1) */
508                 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
509         } else {
510                 /* EOI deactivates interrupt too (mode 0) */
511                 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
512         }
513
514         /* ... and let's hit the road... */
515         gic_write_grpen1(1);
516 }
517
518 static int gic_dist_supports_lpis(void)
519 {
520         return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
521 }
522
523 static void gic_cpu_init(void)
524 {
525         void __iomem *rbase;
526
527         /* Register ourselves with the rest of the world */
528         if (gic_populate_rdist())
529                 return;
530
531         gic_enable_redist(true);
532
533         rbase = gic_data_rdist_sgi_base();
534
535         /* Configure SGIs/PPIs as non-secure Group-1 */
536         writel_relaxed(~0, rbase + GICR_IGROUPR0);
537
538         gic_cpu_config(rbase, gic_redist_wait_for_rwp);
539
540         /* Give LPIs a spin */
541         if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
542                 its_cpu_init();
543
544         /* initialise system registers */
545         gic_cpu_sys_reg_init();
546 }
547
548 #ifdef CONFIG_SMP
549
550 static int gic_starting_cpu(unsigned int cpu)
551 {
552         gic_cpu_init();
553         return 0;
554 }
555
556 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
557                                    unsigned long cluster_id)
558 {
559         int next_cpu, cpu = *base_cpu;
560         unsigned long mpidr = cpu_logical_map(cpu);
561         u16 tlist = 0;
562
563         while (cpu < nr_cpu_ids) {
564                 /*
565                  * If we ever get a cluster of more than 16 CPUs, just
566                  * scream and skip that CPU.
567                  */
568                 if (WARN_ON((mpidr & 0xff) >= 16))
569                         goto out;
570
571                 tlist |= 1 << (mpidr & 0xf);
572
573                 next_cpu = cpumask_next(cpu, mask);
574                 if (next_cpu >= nr_cpu_ids)
575                         goto out;
576                 cpu = next_cpu;
577
578                 mpidr = cpu_logical_map(cpu);
579
580                 if (cluster_id != (mpidr & ~0xffUL)) {
581                         cpu--;
582                         goto out;
583                 }
584         }
585 out:
586         *base_cpu = cpu;
587         return tlist;
588 }
589
590 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
591         (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
592                 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
593
594 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
595 {
596         u64 val;
597
598         val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)     |
599                MPIDR_TO_SGI_AFFINITY(cluster_id, 2)     |
600                irq << ICC_SGI1R_SGI_ID_SHIFT            |
601                MPIDR_TO_SGI_AFFINITY(cluster_id, 1)     |
602                tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
603
604         pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
605         gic_write_sgi1r(val);
606 }
607
608 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
609 {
610         int cpu;
611
612         if (WARN_ON(irq >= 16))
613                 return;
614
615         /*
616          * Ensure that stores to Normal memory are visible to the
617          * other CPUs before issuing the IPI.
618          */
619         smp_wmb();
620
621         for_each_cpu(cpu, mask) {
622                 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
623                 u16 tlist;
624
625                 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
626                 gic_send_sgi(cluster_id, tlist, irq);
627         }
628
629         /* Force the above writes to ICC_SGI1R_EL1 to be executed */
630         isb();
631 }
632
633 static void gic_smp_init(void)
634 {
635         set_smp_cross_call(gic_raise_softirq);
636         cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
637                                   "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
638                                   NULL);
639 }
640
641 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
642                             bool force)
643 {
644         unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
645         void __iomem *reg;
646         int enabled;
647         u64 val;
648
649         if (gic_irq_in_rdist(d))
650                 return -EINVAL;
651
652         /* If interrupt was enabled, disable it first */
653         enabled = gic_peek_irq(d, GICD_ISENABLER);
654         if (enabled)
655                 gic_mask_irq(d);
656
657         reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
658         val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
659
660         gic_write_irouter(val, reg);
661
662         /*
663          * If the interrupt was enabled, enabled it again. Otherwise,
664          * just wait for the distributor to have digested our changes.
665          */
666         if (enabled)
667                 gic_unmask_irq(d);
668         else
669                 gic_dist_wait_for_rwp();
670
671         return IRQ_SET_MASK_OK_DONE;
672 }
673 #else
674 #define gic_set_affinity        NULL
675 #define gic_smp_init()          do { } while(0)
676 #endif
677
678 #ifdef CONFIG_CPU_PM
679 /* Check whether it's single security state view */
680 static bool gic_dist_security_disabled(void)
681 {
682         return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
683 }
684
685 static int gic_cpu_pm_notifier(struct notifier_block *self,
686                                unsigned long cmd, void *v)
687 {
688         if (cmd == CPU_PM_EXIT) {
689                 if (gic_dist_security_disabled())
690                         gic_enable_redist(true);
691                 gic_cpu_sys_reg_init();
692         } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
693                 gic_write_grpen1(0);
694                 gic_enable_redist(false);
695         }
696         return NOTIFY_OK;
697 }
698
699 static struct notifier_block gic_cpu_pm_notifier_block = {
700         .notifier_call = gic_cpu_pm_notifier,
701 };
702
703 static void gic_cpu_pm_init(void)
704 {
705         cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
706 }
707
708 #else
709 static inline void gic_cpu_pm_init(void) { }
710 #endif /* CONFIG_CPU_PM */
711
712 static struct irq_chip gic_chip = {
713         .name                   = "GICv3",
714         .irq_mask               = gic_mask_irq,
715         .irq_unmask             = gic_unmask_irq,
716         .irq_eoi                = gic_eoi_irq,
717         .irq_set_type           = gic_set_type,
718         .irq_set_affinity       = gic_set_affinity,
719         .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
720         .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
721         .flags                  = IRQCHIP_SET_TYPE_MASKED,
722 };
723
724 static struct irq_chip gic_eoimode1_chip = {
725         .name                   = "GICv3",
726         .irq_mask               = gic_eoimode1_mask_irq,
727         .irq_unmask             = gic_unmask_irq,
728         .irq_eoi                = gic_eoimode1_eoi_irq,
729         .irq_set_type           = gic_set_type,
730         .irq_set_affinity       = gic_set_affinity,
731         .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
732         .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
733         .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
734         .flags                  = IRQCHIP_SET_TYPE_MASKED,
735 };
736
737 #define GIC_ID_NR               (1U << gic_data.rdists.id_bits)
738
739 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
740                               irq_hw_number_t hw)
741 {
742         struct irq_chip *chip = &gic_chip;
743
744         if (static_key_true(&supports_deactivate))
745                 chip = &gic_eoimode1_chip;
746
747         /* SGIs are private to the core kernel */
748         if (hw < 16)
749                 return -EPERM;
750         /* Nothing here */
751         if (hw >= gic_data.irq_nr && hw < 8192)
752                 return -EPERM;
753         /* Off limits */
754         if (hw >= GIC_ID_NR)
755                 return -EPERM;
756
757         /* PPIs */
758         if (hw < 32) {
759                 irq_set_percpu_devid(irq);
760                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
761                                     handle_percpu_devid_irq, NULL, NULL);
762                 irq_set_status_flags(irq, IRQ_NOAUTOEN);
763         }
764         /* SPIs */
765         if (hw >= 32 && hw < gic_data.irq_nr) {
766                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
767                                     handle_fasteoi_irq, NULL, NULL);
768                 irq_set_probe(irq);
769         }
770         /* LPIs */
771         if (hw >= 8192 && hw < GIC_ID_NR) {
772                 if (!gic_dist_supports_lpis())
773                         return -EPERM;
774                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
775                                     handle_fasteoi_irq, NULL, NULL);
776         }
777
778         return 0;
779 }
780
781 static int gic_irq_domain_translate(struct irq_domain *d,
782                                     struct irq_fwspec *fwspec,
783                                     unsigned long *hwirq,
784                                     unsigned int *type)
785 {
786         if (is_of_node(fwspec->fwnode)) {
787                 if (fwspec->param_count < 3)
788                         return -EINVAL;
789
790                 switch (fwspec->param[0]) {
791                 case 0:                 /* SPI */
792                         *hwirq = fwspec->param[1] + 32;
793                         break;
794                 case 1:                 /* PPI */
795                         *hwirq = fwspec->param[1] + 16;
796                         break;
797                 case GIC_IRQ_TYPE_LPI:  /* LPI */
798                         *hwirq = fwspec->param[1];
799                         break;
800                 default:
801                         return -EINVAL;
802                 }
803
804                 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
805                 return 0;
806         }
807
808         if (is_fwnode_irqchip(fwspec->fwnode)) {
809                 if(fwspec->param_count != 2)
810                         return -EINVAL;
811
812                 *hwirq = fwspec->param[0];
813                 *type = fwspec->param[1];
814                 return 0;
815         }
816
817         return -EINVAL;
818 }
819
820 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
821                                 unsigned int nr_irqs, void *arg)
822 {
823         int i, ret;
824         irq_hw_number_t hwirq;
825         unsigned int type = IRQ_TYPE_NONE;
826         struct irq_fwspec *fwspec = arg;
827
828         ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
829         if (ret)
830                 return ret;
831
832         for (i = 0; i < nr_irqs; i++)
833                 gic_irq_domain_map(domain, virq + i, hwirq + i);
834
835         return 0;
836 }
837
838 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
839                                 unsigned int nr_irqs)
840 {
841         int i;
842
843         for (i = 0; i < nr_irqs; i++) {
844                 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
845                 irq_set_handler(virq + i, NULL);
846                 irq_domain_reset_irq_data(d);
847         }
848 }
849
850 static int gic_irq_domain_select(struct irq_domain *d,
851                                  struct irq_fwspec *fwspec,
852                                  enum irq_domain_bus_token bus_token)
853 {
854         /* Not for us */
855         if (fwspec->fwnode != d->fwnode)
856                 return 0;
857
858         /* If this is not DT, then we have a single domain */
859         if (!is_of_node(fwspec->fwnode))
860                 return 1;
861
862         /*
863          * If this is a PPI and we have a 4th (non-null) parameter,
864          * then we need to match the partition domain.
865          */
866         if (fwspec->param_count >= 4 &&
867             fwspec->param[0] == 1 && fwspec->param[3] != 0)
868                 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
869
870         return d == gic_data.domain;
871 }
872
873 static const struct irq_domain_ops gic_irq_domain_ops = {
874         .translate = gic_irq_domain_translate,
875         .alloc = gic_irq_domain_alloc,
876         .free = gic_irq_domain_free,
877         .select = gic_irq_domain_select,
878 };
879
880 static int partition_domain_translate(struct irq_domain *d,
881                                       struct irq_fwspec *fwspec,
882                                       unsigned long *hwirq,
883                                       unsigned int *type)
884 {
885         struct device_node *np;
886         int ret;
887
888         np = of_find_node_by_phandle(fwspec->param[3]);
889         if (WARN_ON(!np))
890                 return -EINVAL;
891
892         ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
893                                      of_node_to_fwnode(np));
894         if (ret < 0)
895                 return ret;
896
897         *hwirq = ret;
898         *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
899
900         return 0;
901 }
902
903 static const struct irq_domain_ops partition_domain_ops = {
904         .translate = partition_domain_translate,
905         .select = gic_irq_domain_select,
906 };
907
908 static void gicv3_enable_quirks(void)
909 {
910 #ifdef CONFIG_ARM64
911         if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
912                 static_branch_enable(&is_cavium_thunderx);
913 #endif
914 }
915
916 static int __init gic_init_bases(void __iomem *dist_base,
917                                  struct redist_region *rdist_regs,
918                                  u32 nr_redist_regions,
919                                  u64 redist_stride,
920                                  struct fwnode_handle *handle)
921 {
922         u32 typer;
923         int gic_irqs;
924         int err;
925
926         if (!is_hyp_mode_available())
927                 static_key_slow_dec(&supports_deactivate);
928
929         if (static_key_true(&supports_deactivate))
930                 pr_info("GIC: Using split EOI/Deactivate mode\n");
931
932         gic_data.fwnode = handle;
933         gic_data.dist_base = dist_base;
934         gic_data.redist_regions = rdist_regs;
935         gic_data.nr_redist_regions = nr_redist_regions;
936         gic_data.redist_stride = redist_stride;
937
938         gicv3_enable_quirks();
939
940         /*
941          * Find out how many interrupts are supported.
942          * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
943          */
944         typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
945         gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
946         gic_irqs = GICD_TYPER_IRQS(typer);
947         if (gic_irqs > 1020)
948                 gic_irqs = 1020;
949         gic_data.irq_nr = gic_irqs;
950
951         gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
952                                                  &gic_data);
953         gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
954
955         if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
956                 err = -ENOMEM;
957                 goto out_free;
958         }
959
960         set_handle_irq(gic_handle_irq);
961
962         if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
963                 its_init(handle, &gic_data.rdists, gic_data.domain);
964
965         gic_smp_init();
966         gic_dist_init();
967         gic_cpu_init();
968         gic_cpu_pm_init();
969
970         return 0;
971
972 out_free:
973         if (gic_data.domain)
974                 irq_domain_remove(gic_data.domain);
975         free_percpu(gic_data.rdists.rdist);
976         return err;
977 }
978
979 static int __init gic_validate_dist_version(void __iomem *dist_base)
980 {
981         u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
982
983         if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
984                 return -ENODEV;
985
986         return 0;
987 }
988
989 static int get_cpu_number(struct device_node *dn)
990 {
991         const __be32 *cell;
992         u64 hwid;
993         int i;
994
995         cell = of_get_property(dn, "reg", NULL);
996         if (!cell)
997                 return -1;
998
999         hwid = of_read_number(cell, of_n_addr_cells(dn));
1000
1001         /*
1002          * Non affinity bits must be set to 0 in the DT
1003          */
1004         if (hwid & ~MPIDR_HWID_BITMASK)
1005                 return -1;
1006
1007         for (i = 0; i < num_possible_cpus(); i++)
1008                 if (cpu_logical_map(i) == hwid)
1009                         return i;
1010
1011         return -1;
1012 }
1013
1014 /* Create all possible partitions at boot time */
1015 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1016 {
1017         struct device_node *parts_node, *child_part;
1018         int part_idx = 0, i;
1019         int nr_parts;
1020         struct partition_affinity *parts;
1021
1022         parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
1023         if (!parts_node)
1024                 return;
1025
1026         nr_parts = of_get_child_count(parts_node);
1027
1028         if (!nr_parts)
1029                 return;
1030
1031         parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
1032         if (WARN_ON(!parts))
1033                 return;
1034
1035         for_each_child_of_node(parts_node, child_part) {
1036                 struct partition_affinity *part;
1037                 int n;
1038
1039                 part = &parts[part_idx];
1040
1041                 part->partition_id = of_node_to_fwnode(child_part);
1042
1043                 pr_info("GIC: PPI partition %s[%d] { ",
1044                         child_part->name, part_idx);
1045
1046                 n = of_property_count_elems_of_size(child_part, "affinity",
1047                                                     sizeof(u32));
1048                 WARN_ON(n <= 0);
1049
1050                 for (i = 0; i < n; i++) {
1051                         int err, cpu;
1052                         u32 cpu_phandle;
1053                         struct device_node *cpu_node;
1054
1055                         err = of_property_read_u32_index(child_part, "affinity",
1056                                                          i, &cpu_phandle);
1057                         if (WARN_ON(err))
1058                                 continue;
1059
1060                         cpu_node = of_find_node_by_phandle(cpu_phandle);
1061                         if (WARN_ON(!cpu_node))
1062                                 continue;
1063
1064                         cpu = get_cpu_number(cpu_node);
1065                         if (WARN_ON(cpu == -1))
1066                                 continue;
1067
1068                         pr_cont("%s[%d] ", cpu_node->full_name, cpu);
1069
1070                         cpumask_set_cpu(cpu, &part->mask);
1071                 }
1072
1073                 pr_cont("}\n");
1074                 part_idx++;
1075         }
1076
1077         for (i = 0; i < 16; i++) {
1078                 unsigned int irq;
1079                 struct partition_desc *desc;
1080                 struct irq_fwspec ppi_fwspec = {
1081                         .fwnode         = gic_data.fwnode,
1082                         .param_count    = 3,
1083                         .param          = {
1084                                 [0]     = 1,
1085                                 [1]     = i,
1086                                 [2]     = IRQ_TYPE_NONE,
1087                         },
1088                 };
1089
1090                 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1091                 if (WARN_ON(!irq))
1092                         continue;
1093                 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1094                                              irq, &partition_domain_ops);
1095                 if (WARN_ON(!desc))
1096                         continue;
1097
1098                 gic_data.ppi_descs[i] = desc;
1099         }
1100 }
1101
1102 static void __init gic_of_setup_kvm_info(struct device_node *node)
1103 {
1104         int ret;
1105         struct resource r;
1106         u32 gicv_idx;
1107
1108         gic_v3_kvm_info.type = GIC_V3;
1109
1110         gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1111         if (!gic_v3_kvm_info.maint_irq)
1112                 return;
1113
1114         if (of_property_read_u32(node, "#redistributor-regions",
1115                                  &gicv_idx))
1116                 gicv_idx = 1;
1117
1118         gicv_idx += 3;  /* Also skip GICD, GICC, GICH */
1119         ret = of_address_to_resource(node, gicv_idx, &r);
1120         if (!ret)
1121                 gic_v3_kvm_info.vcpu = r;
1122
1123         gic_set_kvm_info(&gic_v3_kvm_info);
1124 }
1125
1126 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1127 {
1128         void __iomem *dist_base;
1129         struct redist_region *rdist_regs;
1130         u64 redist_stride;
1131         u32 nr_redist_regions;
1132         int err, i;
1133
1134         dist_base = of_iomap(node, 0);
1135         if (!dist_base) {
1136                 pr_err("%s: unable to map gic dist registers\n",
1137                         node->full_name);
1138                 return -ENXIO;
1139         }
1140
1141         err = gic_validate_dist_version(dist_base);
1142         if (err) {
1143                 pr_err("%s: no distributor detected, giving up\n",
1144                         node->full_name);
1145                 goto out_unmap_dist;
1146         }
1147
1148         if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1149                 nr_redist_regions = 1;
1150
1151         rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
1152         if (!rdist_regs) {
1153                 err = -ENOMEM;
1154                 goto out_unmap_dist;
1155         }
1156
1157         for (i = 0; i < nr_redist_regions; i++) {
1158                 struct resource res;
1159                 int ret;
1160
1161                 ret = of_address_to_resource(node, 1 + i, &res);
1162                 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1163                 if (ret || !rdist_regs[i].redist_base) {
1164                         pr_err("%s: couldn't map region %d\n",
1165                                node->full_name, i);
1166                         err = -ENODEV;
1167                         goto out_unmap_rdist;
1168                 }
1169                 rdist_regs[i].phys_base = res.start;
1170         }
1171
1172         if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1173                 redist_stride = 0;
1174
1175         err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1176                              redist_stride, &node->fwnode);
1177         if (err)
1178                 goto out_unmap_rdist;
1179
1180         gic_populate_ppi_partitions(node);
1181         gic_of_setup_kvm_info(node);
1182         return 0;
1183
1184 out_unmap_rdist:
1185         for (i = 0; i < nr_redist_regions; i++)
1186                 if (rdist_regs[i].redist_base)
1187                         iounmap(rdist_regs[i].redist_base);
1188         kfree(rdist_regs);
1189 out_unmap_dist:
1190         iounmap(dist_base);
1191         return err;
1192 }
1193
1194 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
1195
1196 #ifdef CONFIG_ACPI
1197 static struct
1198 {
1199         void __iomem *dist_base;
1200         struct redist_region *redist_regs;
1201         u32 nr_redist_regions;
1202         bool single_redist;
1203         u32 maint_irq;
1204         int maint_irq_mode;
1205         phys_addr_t vcpu_base;
1206 } acpi_data __initdata;
1207
1208 static void __init
1209 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1210 {
1211         static int count = 0;
1212
1213         acpi_data.redist_regs[count].phys_base = phys_base;
1214         acpi_data.redist_regs[count].redist_base = redist_base;
1215         acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
1216         count++;
1217 }
1218
1219 static int __init
1220 gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
1221                            const unsigned long end)
1222 {
1223         struct acpi_madt_generic_redistributor *redist =
1224                         (struct acpi_madt_generic_redistributor *)header;
1225         void __iomem *redist_base;
1226
1227         redist_base = ioremap(redist->base_address, redist->length);
1228         if (!redist_base) {
1229                 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1230                 return -ENOMEM;
1231         }
1232
1233         gic_acpi_register_redist(redist->base_address, redist_base);
1234         return 0;
1235 }
1236
1237 static int __init
1238 gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
1239                          const unsigned long end)
1240 {
1241         struct acpi_madt_generic_interrupt *gicc =
1242                                 (struct acpi_madt_generic_interrupt *)header;
1243         u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1244         u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1245         void __iomem *redist_base;
1246
1247         redist_base = ioremap(gicc->gicr_base_address, size);
1248         if (!redist_base)
1249                 return -ENOMEM;
1250
1251         gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1252         return 0;
1253 }
1254
1255 static int __init gic_acpi_collect_gicr_base(void)
1256 {
1257         acpi_tbl_entry_handler redist_parser;
1258         enum acpi_madt_type type;
1259
1260         if (acpi_data.single_redist) {
1261                 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1262                 redist_parser = gic_acpi_parse_madt_gicc;
1263         } else {
1264                 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1265                 redist_parser = gic_acpi_parse_madt_redist;
1266         }
1267
1268         /* Collect redistributor base addresses in GICR entries */
1269         if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1270                 return 0;
1271
1272         pr_info("No valid GICR entries exist\n");
1273         return -ENODEV;
1274 }
1275
1276 static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
1277                                   const unsigned long end)
1278 {
1279         /* Subtable presence means that redist exists, that's it */
1280         return 0;
1281 }
1282
1283 static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
1284                                       const unsigned long end)
1285 {
1286         struct acpi_madt_generic_interrupt *gicc =
1287                                 (struct acpi_madt_generic_interrupt *)header;
1288
1289         /*
1290          * If GICC is enabled and has valid gicr base address, then it means
1291          * GICR base is presented via GICC
1292          */
1293         if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
1294                 return 0;
1295
1296         return -ENODEV;
1297 }
1298
1299 static int __init gic_acpi_count_gicr_regions(void)
1300 {
1301         int count;
1302
1303         /*
1304          * Count how many redistributor regions we have. It is not allowed
1305          * to mix redistributor description, GICR and GICC subtables have to be
1306          * mutually exclusive.
1307          */
1308         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1309                                       gic_acpi_match_gicr, 0);
1310         if (count > 0) {
1311                 acpi_data.single_redist = false;
1312                 return count;
1313         }
1314
1315         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1316                                       gic_acpi_match_gicc, 0);
1317         if (count > 0)
1318                 acpi_data.single_redist = true;
1319
1320         return count;
1321 }
1322
1323 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1324                                            struct acpi_probe_entry *ape)
1325 {
1326         struct acpi_madt_generic_distributor *dist;
1327         int count;
1328
1329         dist = (struct acpi_madt_generic_distributor *)header;
1330         if (dist->version != ape->driver_data)
1331                 return false;
1332
1333         /* We need to do that exercise anyway, the sooner the better */
1334         count = gic_acpi_count_gicr_regions();
1335         if (count <= 0)
1336                 return false;
1337
1338         acpi_data.nr_redist_regions = count;
1339         return true;
1340 }
1341
1342 static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
1343                                                 const unsigned long end)
1344 {
1345         struct acpi_madt_generic_interrupt *gicc =
1346                 (struct acpi_madt_generic_interrupt *)header;
1347         int maint_irq_mode;
1348         static int first_madt = true;
1349
1350         /* Skip unusable CPUs */
1351         if (!(gicc->flags & ACPI_MADT_ENABLED))
1352                 return 0;
1353
1354         maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1355                 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1356
1357         if (first_madt) {
1358                 first_madt = false;
1359
1360                 acpi_data.maint_irq = gicc->vgic_interrupt;
1361                 acpi_data.maint_irq_mode = maint_irq_mode;
1362                 acpi_data.vcpu_base = gicc->gicv_base_address;
1363
1364                 return 0;
1365         }
1366
1367         /*
1368          * The maintenance interrupt and GICV should be the same for every CPU
1369          */
1370         if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
1371             (acpi_data.maint_irq_mode != maint_irq_mode) ||
1372             (acpi_data.vcpu_base != gicc->gicv_base_address))
1373                 return -EINVAL;
1374
1375         return 0;
1376 }
1377
1378 static bool __init gic_acpi_collect_virt_info(void)
1379 {
1380         int count;
1381
1382         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1383                                       gic_acpi_parse_virt_madt_gicc, 0);
1384
1385         return (count > 0);
1386 }
1387
1388 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
1389 #define ACPI_GICV2_VCTRL_MEM_SIZE       (SZ_4K)
1390 #define ACPI_GICV2_VCPU_MEM_SIZE        (SZ_8K)
1391
1392 static void __init gic_acpi_setup_kvm_info(void)
1393 {
1394         int irq;
1395
1396         if (!gic_acpi_collect_virt_info()) {
1397                 pr_warn("Unable to get hardware information used for virtualization\n");
1398                 return;
1399         }
1400
1401         gic_v3_kvm_info.type = GIC_V3;
1402
1403         irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1404                                 acpi_data.maint_irq_mode,
1405                                 ACPI_ACTIVE_HIGH);
1406         if (irq <= 0)
1407                 return;
1408
1409         gic_v3_kvm_info.maint_irq = irq;
1410
1411         if (acpi_data.vcpu_base) {
1412                 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
1413
1414                 vcpu->flags = IORESOURCE_MEM;
1415                 vcpu->start = acpi_data.vcpu_base;
1416                 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1417         }
1418
1419         gic_set_kvm_info(&gic_v3_kvm_info);
1420 }
1421
1422 static int __init
1423 gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
1424 {
1425         struct acpi_madt_generic_distributor *dist;
1426         struct fwnode_handle *domain_handle;
1427         size_t size;
1428         int i, err;
1429
1430         /* Get distributor base address */
1431         dist = (struct acpi_madt_generic_distributor *)header;
1432         acpi_data.dist_base = ioremap(dist->base_address,
1433                                       ACPI_GICV3_DIST_MEM_SIZE);
1434         if (!acpi_data.dist_base) {
1435                 pr_err("Unable to map GICD registers\n");
1436                 return -ENOMEM;
1437         }
1438
1439         err = gic_validate_dist_version(acpi_data.dist_base);
1440         if (err) {
1441                 pr_err("No distributor detected at @%p, giving up",
1442                        acpi_data.dist_base);
1443                 goto out_dist_unmap;
1444         }
1445
1446         size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
1447         acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
1448         if (!acpi_data.redist_regs) {
1449                 err = -ENOMEM;
1450                 goto out_dist_unmap;
1451         }
1452
1453         err = gic_acpi_collect_gicr_base();
1454         if (err)
1455                 goto out_redist_unmap;
1456
1457         domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
1458         if (!domain_handle) {
1459                 err = -ENOMEM;
1460                 goto out_redist_unmap;
1461         }
1462
1463         err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
1464                              acpi_data.nr_redist_regions, 0, domain_handle);
1465         if (err)
1466                 goto out_fwhandle_free;
1467
1468         acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
1469         gic_acpi_setup_kvm_info();
1470
1471         return 0;
1472
1473 out_fwhandle_free:
1474         irq_domain_free_fwnode(domain_handle);
1475 out_redist_unmap:
1476         for (i = 0; i < acpi_data.nr_redist_regions; i++)
1477                 if (acpi_data.redist_regs[i].redist_base)
1478                         iounmap(acpi_data.redist_regs[i].redist_base);
1479         kfree(acpi_data.redist_regs);
1480 out_dist_unmap:
1481         iounmap(acpi_data.dist_base);
1482         return err;
1483 }
1484 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1485                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
1486                      gic_acpi_init);
1487 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1488                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
1489                      gic_acpi_init);
1490 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1491                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
1492                      gic_acpi_init);
1493 #endif