2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 #include <linux/bitmap.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqchip/mips-gic.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/irq.h>
16 #include <linux/clocksource.h>
19 #include <asm/setup.h>
20 #include <asm/traps.h>
21 #include <linux/hardirq.h>
22 #include <asm-generic/bitops/find.h>
24 unsigned int gic_frequency;
25 unsigned int gic_present;
27 struct gic_pcpu_mask {
28 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 struct gic_pending_regs {
32 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
35 struct gic_intrmask_regs {
36 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
39 static void __iomem *gic_base;
40 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
41 static struct gic_pending_regs pending_regs[NR_CPUS];
42 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
43 static DEFINE_SPINLOCK(gic_lock);
44 static struct irq_domain *gic_irq_domain;
45 static int gic_shared_intrs;
47 static unsigned int gic_cpu_pin;
48 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
50 static void __gic_irq_dispatch(void);
52 static inline unsigned int gic_read(unsigned int reg)
54 return __raw_readl(gic_base + reg);
57 static inline void gic_write(unsigned int reg, unsigned int val)
59 __raw_writel(val, gic_base + reg);
62 static inline void gic_update_bits(unsigned int reg, unsigned int mask,
67 regval = gic_read(reg);
70 gic_write(reg, regval);
73 static inline void gic_reset_mask(unsigned int intr)
75 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
76 1 << GIC_INTR_BIT(intr));
79 static inline void gic_set_mask(unsigned int intr)
81 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
82 1 << GIC_INTR_BIT(intr));
85 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
87 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
88 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
89 pol << GIC_INTR_BIT(intr));
92 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
94 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
95 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
96 trig << GIC_INTR_BIT(intr));
99 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
101 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
102 1 << GIC_INTR_BIT(intr),
103 dual << GIC_INTR_BIT(intr));
106 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
108 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
109 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
112 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
114 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
115 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
116 GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
119 #if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
120 cycle_t gic_read_count(void)
122 unsigned int hi, hi2, lo;
125 hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
126 lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
127 hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
130 return (((cycle_t) hi) << 32) + lo;
133 unsigned int gic_get_count_width(void)
135 unsigned int bits, config;
137 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
138 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
139 GIC_SH_CONFIG_COUNTBITS_SHF);
144 void gic_write_compare(cycle_t cnt)
146 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
148 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
149 (int)(cnt & 0xffffffff));
152 void gic_write_cpu_compare(cycle_t cnt, int cpu)
156 local_irq_save(flags);
158 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
159 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
161 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
162 (int)(cnt & 0xffffffff));
164 local_irq_restore(flags);
167 cycle_t gic_read_compare(void)
171 hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
172 lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
174 return (((cycle_t) hi) << 32) + lo;
178 static bool gic_local_irq_is_routable(int intr)
182 /* All local interrupts are routable in EIC mode. */
186 vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
188 case GIC_LOCAL_INT_TIMER:
189 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
190 case GIC_LOCAL_INT_PERFCTR:
191 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
192 case GIC_LOCAL_INT_FDC:
193 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
194 case GIC_LOCAL_INT_SWINT0:
195 case GIC_LOCAL_INT_SWINT1:
196 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
202 unsigned int gic_get_timer_pending(void)
204 unsigned int vpe_pending;
206 vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
207 return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
210 static void gic_bind_eic_interrupt(int irq, int set)
212 /* Convert irq vector # to hw int # */
213 irq -= GIC_PIN_TO_VEC_OFFSET;
215 /* Set irq to use shadow set */
216 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
217 GIC_VPE_EIC_SS(irq), set);
220 void gic_send_ipi(unsigned int intr)
222 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
225 int gic_get_c0_compare_int(void)
227 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
228 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
229 return irq_create_mapping(gic_irq_domain,
230 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
233 int gic_get_c0_perfcount_int(void)
235 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
236 /* Is the erformance counter shared with the timer? */
237 if (cp0_perfcount_irq < 0)
239 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
241 return irq_create_mapping(gic_irq_domain,
242 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
245 static unsigned int gic_get_int(void)
248 unsigned long *pending, *intrmask, *pcpu_mask;
249 unsigned long pending_reg, intrmask_reg;
251 /* Get per-cpu bitmaps */
252 pending = pending_regs[smp_processor_id()].pending;
253 intrmask = intrmask_regs[smp_processor_id()].intrmask;
254 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
256 pending_reg = GIC_REG(SHARED, GIC_SH_PEND_31_0);
257 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK_31_0);
259 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
260 pending[i] = gic_read(pending_reg);
261 intrmask[i] = gic_read(intrmask_reg);
266 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
267 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
269 return find_first_bit(pending, gic_shared_intrs);
272 static void gic_mask_irq(struct irq_data *d)
274 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
277 static void gic_unmask_irq(struct irq_data *d)
279 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
282 static void gic_ack_irq(struct irq_data *d)
284 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
286 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
289 static int gic_set_type(struct irq_data *d, unsigned int type)
291 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
295 spin_lock_irqsave(&gic_lock, flags);
296 switch (type & IRQ_TYPE_SENSE_MASK) {
297 case IRQ_TYPE_EDGE_FALLING:
298 gic_set_polarity(irq, GIC_POL_NEG);
299 gic_set_trigger(irq, GIC_TRIG_EDGE);
300 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
303 case IRQ_TYPE_EDGE_RISING:
304 gic_set_polarity(irq, GIC_POL_POS);
305 gic_set_trigger(irq, GIC_TRIG_EDGE);
306 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
309 case IRQ_TYPE_EDGE_BOTH:
310 /* polarity is irrelevant in this case */
311 gic_set_trigger(irq, GIC_TRIG_EDGE);
312 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
315 case IRQ_TYPE_LEVEL_LOW:
316 gic_set_polarity(irq, GIC_POL_NEG);
317 gic_set_trigger(irq, GIC_TRIG_LEVEL);
318 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
321 case IRQ_TYPE_LEVEL_HIGH:
323 gic_set_polarity(irq, GIC_POL_POS);
324 gic_set_trigger(irq, GIC_TRIG_LEVEL);
325 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
331 __irq_set_chip_handler_name_locked(d->irq,
332 &gic_edge_irq_controller,
333 handle_edge_irq, NULL);
335 __irq_set_chip_handler_name_locked(d->irq,
336 &gic_level_irq_controller,
337 handle_level_irq, NULL);
339 spin_unlock_irqrestore(&gic_lock, flags);
345 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
348 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
349 cpumask_t tmp = CPU_MASK_NONE;
353 cpumask_and(&tmp, cpumask, cpu_online_mask);
357 /* Assumption : cpumask refers to a single CPU */
358 spin_lock_irqsave(&gic_lock, flags);
360 /* Re-route this IRQ */
361 gic_map_to_vpe(irq, first_cpu(tmp));
363 /* Update the pcpu_masks */
364 for (i = 0; i < NR_CPUS; i++)
365 clear_bit(irq, pcpu_masks[i].pcpu_mask);
366 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
368 cpumask_copy(d->affinity, cpumask);
369 spin_unlock_irqrestore(&gic_lock, flags);
371 return IRQ_SET_MASK_OK_NOCOPY;
375 static struct irq_chip gic_level_irq_controller = {
377 .irq_mask = gic_mask_irq,
378 .irq_unmask = gic_unmask_irq,
379 .irq_set_type = gic_set_type,
381 .irq_set_affinity = gic_set_affinity,
385 static struct irq_chip gic_edge_irq_controller = {
387 .irq_ack = gic_ack_irq,
388 .irq_mask = gic_mask_irq,
389 .irq_unmask = gic_unmask_irq,
390 .irq_set_type = gic_set_type,
392 .irq_set_affinity = gic_set_affinity,
396 static unsigned int gic_get_local_int(void)
398 unsigned long pending, masked;
400 pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
401 masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
403 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
405 return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
408 static void gic_mask_local_irq(struct irq_data *d)
410 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
412 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
415 static void gic_unmask_local_irq(struct irq_data *d)
417 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
419 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
422 static struct irq_chip gic_local_irq_controller = {
423 .name = "MIPS GIC Local",
424 .irq_mask = gic_mask_local_irq,
425 .irq_unmask = gic_unmask_local_irq,
428 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
430 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
434 spin_lock_irqsave(&gic_lock, flags);
435 for (i = 0; i < gic_vpes; i++) {
436 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
437 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
439 spin_unlock_irqrestore(&gic_lock, flags);
442 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
444 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
448 spin_lock_irqsave(&gic_lock, flags);
449 for (i = 0; i < gic_vpes; i++) {
450 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
451 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
453 spin_unlock_irqrestore(&gic_lock, flags);
456 static struct irq_chip gic_all_vpes_local_irq_controller = {
457 .name = "MIPS GIC Local",
458 .irq_mask = gic_mask_local_irq_all_vpes,
459 .irq_unmask = gic_unmask_local_irq_all_vpes,
462 static void __gic_irq_dispatch(void)
464 unsigned int intr, virq;
466 while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) {
467 virq = irq_linear_revmap(gic_irq_domain,
468 GIC_LOCAL_TO_HWIRQ(intr));
472 while ((intr = gic_get_int()) != gic_shared_intrs) {
473 virq = irq_linear_revmap(gic_irq_domain,
474 GIC_SHARED_TO_HWIRQ(intr));
479 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
481 __gic_irq_dispatch();
484 #ifdef CONFIG_MIPS_GIC_IPI
485 static int gic_resched_int_base;
486 static int gic_call_int_base;
488 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
490 return gic_resched_int_base + cpu;
493 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
495 return gic_call_int_base + cpu;
498 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
505 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
507 smp_call_function_interrupt();
512 static struct irqaction irq_resched = {
513 .handler = ipi_resched_interrupt,
514 .flags = IRQF_PERCPU,
515 .name = "IPI resched"
518 static struct irqaction irq_call = {
519 .handler = ipi_call_interrupt,
520 .flags = IRQF_PERCPU,
524 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
525 struct irqaction *action)
527 int virq = irq_create_mapping(gic_irq_domain,
528 GIC_SHARED_TO_HWIRQ(intr));
531 gic_map_to_vpe(intr, cpu);
532 for (i = 0; i < NR_CPUS; i++)
533 clear_bit(intr, pcpu_masks[i].pcpu_mask);
534 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
536 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
538 irq_set_handler(virq, handle_percpu_irq);
539 setup_irq(virq, action);
542 static __init void gic_ipi_init(void)
546 /* Use last 2 * NR_CPUS interrupts as IPIs */
547 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
548 gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
550 for (i = 0; i < nr_cpu_ids; i++) {
551 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
552 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
556 static inline void gic_ipi_init(void)
561 static void __init gic_basic_init(void)
565 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
568 for (i = 0; i < gic_shared_intrs; i++) {
569 gic_set_polarity(i, GIC_POL_POS);
570 gic_set_trigger(i, GIC_TRIG_LEVEL);
574 for (i = 0; i < gic_vpes; i++) {
577 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
578 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
579 if (!gic_local_irq_is_routable(j))
581 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
586 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
589 int intr = GIC_HWIRQ_TO_LOCAL(hw);
594 if (!gic_local_irq_is_routable(intr))
598 * HACK: These are all really percpu interrupts, but the rest
599 * of the MIPS kernel code does not use the percpu IRQ API for
600 * the CP0 timer and performance counter interrupts.
602 if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
603 irq_set_chip_and_handler(virq,
604 &gic_local_irq_controller,
605 handle_percpu_devid_irq);
606 irq_set_percpu_devid(virq);
608 irq_set_chip_and_handler(virq,
609 &gic_all_vpes_local_irq_controller,
613 spin_lock_irqsave(&gic_lock, flags);
614 for (i = 0; i < gic_vpes; i++) {
615 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
617 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
620 case GIC_LOCAL_INT_WD:
621 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
623 case GIC_LOCAL_INT_COMPARE:
624 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
626 case GIC_LOCAL_INT_TIMER:
627 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
629 case GIC_LOCAL_INT_PERFCTR:
630 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
632 case GIC_LOCAL_INT_SWINT0:
633 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
635 case GIC_LOCAL_INT_SWINT1:
636 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
638 case GIC_LOCAL_INT_FDC:
639 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
642 pr_err("Invalid local IRQ %d\n", intr);
647 spin_unlock_irqrestore(&gic_lock, flags);
652 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
655 int intr = GIC_HWIRQ_TO_SHARED(hw);
658 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
661 spin_lock_irqsave(&gic_lock, flags);
662 gic_map_to_pin(intr, gic_cpu_pin);
663 /* Map to VPE 0 by default */
664 gic_map_to_vpe(intr, 0);
665 set_bit(intr, pcpu_masks[0].pcpu_mask);
666 spin_unlock_irqrestore(&gic_lock, flags);
671 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
674 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
675 return gic_local_irq_domain_map(d, virq, hw);
676 return gic_shared_irq_domain_map(d, virq, hw);
679 static struct irq_domain_ops gic_irq_domain_ops = {
680 .map = gic_irq_domain_map,
681 .xlate = irq_domain_xlate_twocell,
684 void __init gic_init(unsigned long gic_base_addr,
685 unsigned long gic_addrspace_size, unsigned int cpu_vec,
686 unsigned int irqbase)
688 unsigned int gicconfig;
690 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
692 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
693 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
694 GIC_SH_CONFIG_NUMINTRS_SHF;
695 gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
697 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
698 GIC_SH_CONFIG_NUMVPES_SHF;
699 gic_vpes = gic_vpes + 1;
702 /* Always use vector 1 in EIC mode */
704 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
707 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
708 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
712 gic_irq_domain = irq_domain_add_simple(NULL, GIC_NUM_LOCAL_INTRS +
713 gic_shared_intrs, irqbase,
714 &gic_irq_domain_ops, NULL);
716 panic("Failed to add GIC IRQ domain");