2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 #include <linux/bitmap.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/irq.h>
15 #include <linux/clocksource.h>
19 #include <asm/setup.h>
20 #include <asm/traps.h>
21 #include <linux/hardirq.h>
22 #include <asm-generic/bitops/find.h>
24 unsigned int gic_frequency;
25 unsigned int gic_present;
26 unsigned long _gic_base;
27 unsigned int gic_cpu_pin;
29 struct gic_pcpu_mask {
30 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
33 struct gic_pending_regs {
34 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
37 struct gic_intrmask_regs {
38 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
41 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
42 static struct gic_pending_regs pending_regs[NR_CPUS];
43 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
44 static DEFINE_SPINLOCK(gic_lock);
45 static struct irq_domain *gic_irq_domain;
46 static int gic_shared_intrs;
47 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
49 static void __gic_irq_dispatch(void);
51 #if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
52 cycle_t gic_read_count(void)
54 unsigned int hi, hi2, lo;
57 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
58 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
59 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
62 return (((cycle_t) hi) << 32) + lo;
65 void gic_write_compare(cycle_t cnt)
67 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
69 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
70 (int)(cnt & 0xffffffff));
73 void gic_write_cpu_compare(cycle_t cnt, int cpu)
77 local_irq_save(flags);
79 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
80 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
82 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
83 (int)(cnt & 0xffffffff));
85 local_irq_restore(flags);
88 cycle_t gic_read_compare(void)
92 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
93 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
95 return (((cycle_t) hi) << 32) + lo;
99 unsigned int gic_get_timer_pending(void)
101 unsigned int vpe_pending;
103 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
104 GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending);
105 return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
108 void gic_bind_eic_interrupt(int irq, int set)
110 /* Convert irq vector # to hw int # */
111 irq -= GIC_PIN_TO_VEC_OFFSET;
113 /* Set irq to use shadow set */
114 GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set);
117 void gic_send_ipi(unsigned int intr)
119 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
122 static void __init vpe_local_setup(unsigned int numvpes)
124 unsigned long timer_intr = GIC_INT_TMR;
125 unsigned long perf_intr = GIC_INT_PERFCTR;
126 unsigned int vpe_ctl;
131 * GIC timer interrupt -> CPU HW Int X (vector X+2) ->
132 * map to pin X+2-1 (since GIC adds 1)
134 timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
136 * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) ->
137 * map to pin X+2-1 (since GIC adds 1)
139 perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
143 * Setup the default performance counter timer interrupts
146 for (i = 0; i < numvpes; i++) {
147 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
149 /* Are Interrupts locally routable? */
150 GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
151 if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
152 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
153 GIC_MAP_TO_PIN_MSK | timer_intr);
155 set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
159 if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
160 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
161 GIC_MAP_TO_PIN_MSK | perf_intr);
163 set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET,
169 unsigned int gic_compare_int(void)
171 unsigned int pending;
173 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
174 if (pending & GIC_VPE_PEND_CMP_MSK)
180 void gic_get_int_mask(unsigned long *dst, const unsigned long *src)
183 unsigned long *pending, *intrmask, *pcpu_mask;
184 unsigned long *pending_abs, *intrmask_abs;
186 /* Get per-cpu bitmaps */
187 pending = pending_regs[smp_processor_id()].pending;
188 intrmask = intrmask_regs[smp_processor_id()].intrmask;
189 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
191 pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
192 GIC_SH_PEND_31_0_OFS);
193 intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
194 GIC_SH_MASK_31_0_OFS);
196 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
197 GICREAD(*pending_abs, pending[i]);
198 GICREAD(*intrmask_abs, intrmask[i]);
203 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
204 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
205 bitmap_and(dst, src, pending, gic_shared_intrs);
208 unsigned int gic_get_int(void)
210 DECLARE_BITMAP(interrupts, GIC_MAX_INTRS);
212 bitmap_fill(interrupts, gic_shared_intrs);
213 gic_get_int_mask(interrupts, interrupts);
215 return find_first_bit(interrupts, gic_shared_intrs);
218 static void gic_mask_irq(struct irq_data *d)
220 GIC_CLR_INTR_MASK(d->hwirq);
223 static void gic_unmask_irq(struct irq_data *d)
225 GIC_SET_INTR_MASK(d->hwirq);
228 static void gic_ack_irq(struct irq_data *d)
230 unsigned int irq = d->hwirq;
232 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
235 static int gic_set_type(struct irq_data *d, unsigned int type)
237 unsigned int irq = d->hwirq;
241 spin_lock_irqsave(&gic_lock, flags);
242 switch (type & IRQ_TYPE_SENSE_MASK) {
243 case IRQ_TYPE_EDGE_FALLING:
244 GIC_SET_POLARITY(irq, GIC_POL_NEG);
245 GIC_SET_TRIGGER(irq, GIC_TRIG_EDGE);
246 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_DISABLE);
249 case IRQ_TYPE_EDGE_RISING:
250 GIC_SET_POLARITY(irq, GIC_POL_POS);
251 GIC_SET_TRIGGER(irq, GIC_TRIG_EDGE);
252 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_DISABLE);
255 case IRQ_TYPE_EDGE_BOTH:
256 /* polarity is irrelevant in this case */
257 GIC_SET_TRIGGER(irq, GIC_TRIG_EDGE);
258 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_ENABLE);
261 case IRQ_TYPE_LEVEL_LOW:
262 GIC_SET_POLARITY(irq, GIC_POL_NEG);
263 GIC_SET_TRIGGER(irq, GIC_TRIG_LEVEL);
264 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_DISABLE);
267 case IRQ_TYPE_LEVEL_HIGH:
269 GIC_SET_POLARITY(irq, GIC_POL_POS);
270 GIC_SET_TRIGGER(irq, GIC_TRIG_LEVEL);
271 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_DISABLE);
277 __irq_set_chip_handler_name_locked(d->irq,
278 &gic_edge_irq_controller,
279 handle_edge_irq, NULL);
281 __irq_set_chip_handler_name_locked(d->irq,
282 &gic_level_irq_controller,
283 handle_level_irq, NULL);
285 spin_unlock_irqrestore(&gic_lock, flags);
291 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
294 unsigned int irq = d->hwirq;
295 cpumask_t tmp = CPU_MASK_NONE;
299 cpumask_and(&tmp, cpumask, cpu_online_mask);
303 /* Assumption : cpumask refers to a single CPU */
304 spin_lock_irqsave(&gic_lock, flags);
306 /* Re-route this IRQ */
307 GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
309 /* Update the pcpu_masks */
310 for (i = 0; i < NR_CPUS; i++)
311 clear_bit(irq, pcpu_masks[i].pcpu_mask);
312 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
314 cpumask_copy(d->affinity, cpumask);
315 spin_unlock_irqrestore(&gic_lock, flags);
317 return IRQ_SET_MASK_OK_NOCOPY;
321 static struct irq_chip gic_level_irq_controller = {
323 .irq_mask = gic_mask_irq,
324 .irq_unmask = gic_unmask_irq,
325 .irq_set_type = gic_set_type,
327 .irq_set_affinity = gic_set_affinity,
331 static struct irq_chip gic_edge_irq_controller = {
333 .irq_ack = gic_ack_irq,
334 .irq_mask = gic_mask_irq,
335 .irq_unmask = gic_unmask_irq,
336 .irq_set_type = gic_set_type,
338 .irq_set_affinity = gic_set_affinity,
342 static void __gic_irq_dispatch(void)
344 unsigned int intr, virq;
346 while ((intr = gic_get_int()) != gic_shared_intrs) {
347 virq = irq_linear_revmap(gic_irq_domain, intr);
352 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
354 __gic_irq_dispatch();
357 #ifdef CONFIG_MIPS_GIC_IPI
358 static int gic_resched_int_base;
359 static int gic_call_int_base;
361 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
363 return gic_resched_int_base + cpu;
366 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
368 return gic_call_int_base + cpu;
371 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
378 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
380 smp_call_function_interrupt();
385 static struct irqaction irq_resched = {
386 .handler = ipi_resched_interrupt,
387 .flags = IRQF_PERCPU,
388 .name = "IPI resched"
391 static struct irqaction irq_call = {
392 .handler = ipi_call_interrupt,
393 .flags = IRQF_PERCPU,
397 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
398 struct irqaction *action)
400 int virq = irq_create_mapping(gic_irq_domain, intr);
403 GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
404 for (i = 0; i < NR_CPUS; i++)
405 clear_bit(intr, pcpu_masks[i].pcpu_mask);
406 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
408 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
410 irq_set_handler(virq, handle_percpu_irq);
411 setup_irq(virq, action);
414 static __init void gic_ipi_init(void)
418 /* Use last 2 * NR_CPUS interrupts as IPIs */
419 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
420 gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
422 for (i = 0; i < nr_cpu_ids; i++) {
423 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
424 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
428 static inline void gic_ipi_init(void)
433 static void __init gic_basic_init(int numvpes)
437 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
440 for (i = 0; i < gic_shared_intrs; i++) {
441 GIC_SET_POLARITY(i, GIC_POL_POS);
442 GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
443 GIC_CLR_INTR_MASK(i);
446 vpe_local_setup(numvpes);
449 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
454 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
457 spin_lock_irqsave(&gic_lock, flags);
458 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(hw)),
459 GIC_MAP_TO_PIN_MSK | gic_cpu_pin);
460 /* Map to VPE 0 by default */
461 GIC_SH_MAP_TO_VPE_SMASK(hw, 0);
462 set_bit(hw, pcpu_masks[0].pcpu_mask);
463 spin_unlock_irqrestore(&gic_lock, flags);
468 static struct irq_domain_ops gic_irq_domain_ops = {
469 .map = gic_irq_domain_map,
470 .xlate = irq_domain_xlate_twocell,
473 void __init gic_init(unsigned long gic_base_addr,
474 unsigned long gic_addrspace_size, unsigned int cpu_vec,
475 unsigned int irqbase)
477 unsigned int gicconfig;
478 int numvpes, numintrs;
480 _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
483 GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
484 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
485 GIC_SH_CONFIG_NUMINTRS_SHF;
486 gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
488 numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
489 GIC_SH_CONFIG_NUMVPES_SHF;
490 numvpes = numvpes + 1;
493 /* Always use vector 1 in EIC mode */
495 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
498 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
499 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
503 gic_irq_domain = irq_domain_add_simple(NULL, gic_shared_intrs, irqbase,
504 &gic_irq_domain_ops, NULL);
506 panic("Failed to add GIC IRQ domain");
508 gic_basic_init(numvpes);