irqchip: mips-gic: Use separate edge/level irq_chips
[cascardo/linux.git] / drivers / irqchip / irq-mips-gic.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
8  */
9 #include <linux/bitmap.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/irq.h>
15 #include <linux/clocksource.h>
16
17 #include <asm/io.h>
18 #include <asm/gic.h>
19 #include <asm/setup.h>
20 #include <asm/traps.h>
21 #include <linux/hardirq.h>
22 #include <asm-generic/bitops/find.h>
23
24 unsigned int gic_frequency;
25 unsigned int gic_present;
26 unsigned long _gic_base;
27 unsigned int gic_cpu_pin;
28
29 struct gic_pcpu_mask {
30         DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 };
32
33 struct gic_pending_regs {
34         DECLARE_BITMAP(pending, GIC_MAX_INTRS);
35 };
36
37 struct gic_intrmask_regs {
38         DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
39 };
40
41 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
42 static struct gic_pending_regs pending_regs[NR_CPUS];
43 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
44 static DEFINE_SPINLOCK(gic_lock);
45 static struct irq_domain *gic_irq_domain;
46 static int gic_shared_intrs;
47 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
48
49 static void __gic_irq_dispatch(void);
50
51 #if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
52 cycle_t gic_read_count(void)
53 {
54         unsigned int hi, hi2, lo;
55
56         do {
57                 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
58                 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
59                 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
60         } while (hi2 != hi);
61
62         return (((cycle_t) hi) << 32) + lo;
63 }
64
65 void gic_write_compare(cycle_t cnt)
66 {
67         GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
68                                 (int)(cnt >> 32));
69         GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
70                                 (int)(cnt & 0xffffffff));
71 }
72
73 void gic_write_cpu_compare(cycle_t cnt, int cpu)
74 {
75         unsigned long flags;
76
77         local_irq_save(flags);
78
79         GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
80         GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
81                                 (int)(cnt >> 32));
82         GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
83                                 (int)(cnt & 0xffffffff));
84
85         local_irq_restore(flags);
86 }
87
88 cycle_t gic_read_compare(void)
89 {
90         unsigned int hi, lo;
91
92         GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
93         GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
94
95         return (((cycle_t) hi) << 32) + lo;
96 }
97 #endif
98
99 unsigned int gic_get_timer_pending(void)
100 {
101         unsigned int vpe_pending;
102
103         GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
104         GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending);
105         return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
106 }
107
108 void gic_bind_eic_interrupt(int irq, int set)
109 {
110         /* Convert irq vector # to hw int # */
111         irq -= GIC_PIN_TO_VEC_OFFSET;
112
113         /* Set irq to use shadow set */
114         GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set);
115 }
116
117 void gic_send_ipi(unsigned int intr)
118 {
119         GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
120 }
121
122 static void __init vpe_local_setup(unsigned int numvpes)
123 {
124         unsigned long timer_intr = GIC_INT_TMR;
125         unsigned long perf_intr = GIC_INT_PERFCTR;
126         unsigned int vpe_ctl;
127         int i;
128
129         if (cpu_has_veic) {
130                 /*
131                  * GIC timer interrupt -> CPU HW Int X (vector X+2) ->
132                  * map to pin X+2-1 (since GIC adds 1)
133                  */
134                 timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
135                 /*
136                  * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) ->
137                  * map to pin X+2-1 (since GIC adds 1)
138                  */
139                 perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
140         }
141
142         /*
143          * Setup the default performance counter timer interrupts
144          * for all VPEs
145          */
146         for (i = 0; i < numvpes; i++) {
147                 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
148
149                 /* Are Interrupts locally routable? */
150                 GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
151                 if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
152                         GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
153                                  GIC_MAP_TO_PIN_MSK | timer_intr);
154                 if (cpu_has_veic) {
155                         set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
156                                        __gic_irq_dispatch);
157                 }
158
159                 if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
160                         GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
161                                  GIC_MAP_TO_PIN_MSK | perf_intr);
162                 if (cpu_has_veic) {
163                         set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET,
164                                        __gic_irq_dispatch);
165                 }
166         }
167 }
168
169 unsigned int gic_compare_int(void)
170 {
171         unsigned int pending;
172
173         GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
174         if (pending & GIC_VPE_PEND_CMP_MSK)
175                 return 1;
176         else
177                 return 0;
178 }
179
180 void gic_get_int_mask(unsigned long *dst, const unsigned long *src)
181 {
182         unsigned int i;
183         unsigned long *pending, *intrmask, *pcpu_mask;
184         unsigned long *pending_abs, *intrmask_abs;
185
186         /* Get per-cpu bitmaps */
187         pending = pending_regs[smp_processor_id()].pending;
188         intrmask = intrmask_regs[smp_processor_id()].intrmask;
189         pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
190
191         pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
192                                                          GIC_SH_PEND_31_0_OFS);
193         intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
194                                                           GIC_SH_MASK_31_0_OFS);
195
196         for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
197                 GICREAD(*pending_abs, pending[i]);
198                 GICREAD(*intrmask_abs, intrmask[i]);
199                 pending_abs++;
200                 intrmask_abs++;
201         }
202
203         bitmap_and(pending, pending, intrmask, gic_shared_intrs);
204         bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
205         bitmap_and(dst, src, pending, gic_shared_intrs);
206 }
207
208 unsigned int gic_get_int(void)
209 {
210         DECLARE_BITMAP(interrupts, GIC_MAX_INTRS);
211
212         bitmap_fill(interrupts, gic_shared_intrs);
213         gic_get_int_mask(interrupts, interrupts);
214
215         return find_first_bit(interrupts, gic_shared_intrs);
216 }
217
218 static void gic_mask_irq(struct irq_data *d)
219 {
220         GIC_CLR_INTR_MASK(d->hwirq);
221 }
222
223 static void gic_unmask_irq(struct irq_data *d)
224 {
225         GIC_SET_INTR_MASK(d->hwirq);
226 }
227
228 static void gic_ack_irq(struct irq_data *d)
229 {
230         unsigned int irq = d->hwirq;
231
232         GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
233 }
234
235 static int gic_set_type(struct irq_data *d, unsigned int type)
236 {
237         unsigned int irq = d->hwirq;
238         unsigned long flags;
239         bool is_edge;
240
241         spin_lock_irqsave(&gic_lock, flags);
242         switch (type & IRQ_TYPE_SENSE_MASK) {
243         case IRQ_TYPE_EDGE_FALLING:
244                 GIC_SET_POLARITY(irq, GIC_POL_NEG);
245                 GIC_SET_TRIGGER(irq, GIC_TRIG_EDGE);
246                 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_DISABLE);
247                 is_edge = true;
248                 break;
249         case IRQ_TYPE_EDGE_RISING:
250                 GIC_SET_POLARITY(irq, GIC_POL_POS);
251                 GIC_SET_TRIGGER(irq, GIC_TRIG_EDGE);
252                 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_DISABLE);
253                 is_edge = true;
254                 break;
255         case IRQ_TYPE_EDGE_BOTH:
256                 /* polarity is irrelevant in this case */
257                 GIC_SET_TRIGGER(irq, GIC_TRIG_EDGE);
258                 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_ENABLE);
259                 is_edge = true;
260                 break;
261         case IRQ_TYPE_LEVEL_LOW:
262                 GIC_SET_POLARITY(irq, GIC_POL_NEG);
263                 GIC_SET_TRIGGER(irq, GIC_TRIG_LEVEL);
264                 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_DISABLE);
265                 is_edge = false;
266                 break;
267         case IRQ_TYPE_LEVEL_HIGH:
268         default:
269                 GIC_SET_POLARITY(irq, GIC_POL_POS);
270                 GIC_SET_TRIGGER(irq, GIC_TRIG_LEVEL);
271                 GIC_SET_DUAL(irq, GIC_TRIG_DUAL_DISABLE);
272                 is_edge = false;
273                 break;
274         }
275
276         if (is_edge) {
277                 __irq_set_chip_handler_name_locked(d->irq,
278                                                    &gic_edge_irq_controller,
279                                                    handle_edge_irq, NULL);
280         } else {
281                 __irq_set_chip_handler_name_locked(d->irq,
282                                                    &gic_level_irq_controller,
283                                                    handle_level_irq, NULL);
284         }
285         spin_unlock_irqrestore(&gic_lock, flags);
286
287         return 0;
288 }
289
290 #ifdef CONFIG_SMP
291 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
292                             bool force)
293 {
294         unsigned int irq = d->hwirq;
295         cpumask_t       tmp = CPU_MASK_NONE;
296         unsigned long   flags;
297         int             i;
298
299         cpumask_and(&tmp, cpumask, cpu_online_mask);
300         if (cpus_empty(tmp))
301                 return -EINVAL;
302
303         /* Assumption : cpumask refers to a single CPU */
304         spin_lock_irqsave(&gic_lock, flags);
305
306         /* Re-route this IRQ */
307         GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
308
309         /* Update the pcpu_masks */
310         for (i = 0; i < NR_CPUS; i++)
311                 clear_bit(irq, pcpu_masks[i].pcpu_mask);
312         set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
313
314         cpumask_copy(d->affinity, cpumask);
315         spin_unlock_irqrestore(&gic_lock, flags);
316
317         return IRQ_SET_MASK_OK_NOCOPY;
318 }
319 #endif
320
321 static struct irq_chip gic_level_irq_controller = {
322         .name                   =       "MIPS GIC",
323         .irq_mask               =       gic_mask_irq,
324         .irq_unmask             =       gic_unmask_irq,
325         .irq_set_type           =       gic_set_type,
326 #ifdef CONFIG_SMP
327         .irq_set_affinity       =       gic_set_affinity,
328 #endif
329 };
330
331 static struct irq_chip gic_edge_irq_controller = {
332         .name                   =       "MIPS GIC",
333         .irq_ack                =       gic_ack_irq,
334         .irq_mask               =       gic_mask_irq,
335         .irq_unmask             =       gic_unmask_irq,
336         .irq_set_type           =       gic_set_type,
337 #ifdef CONFIG_SMP
338         .irq_set_affinity       =       gic_set_affinity,
339 #endif
340 };
341
342 static void __gic_irq_dispatch(void)
343 {
344         unsigned int intr, virq;
345
346         while ((intr = gic_get_int()) != gic_shared_intrs) {
347                 virq = irq_linear_revmap(gic_irq_domain, intr);
348                 do_IRQ(virq);
349         }
350 }
351
352 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
353 {
354         __gic_irq_dispatch();
355 }
356
357 #ifdef CONFIG_MIPS_GIC_IPI
358 static int gic_resched_int_base;
359 static int gic_call_int_base;
360
361 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
362 {
363         return gic_resched_int_base + cpu;
364 }
365
366 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
367 {
368         return gic_call_int_base + cpu;
369 }
370
371 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
372 {
373         scheduler_ipi();
374
375         return IRQ_HANDLED;
376 }
377
378 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
379 {
380         smp_call_function_interrupt();
381
382         return IRQ_HANDLED;
383 }
384
385 static struct irqaction irq_resched = {
386         .handler        = ipi_resched_interrupt,
387         .flags          = IRQF_PERCPU,
388         .name           = "IPI resched"
389 };
390
391 static struct irqaction irq_call = {
392         .handler        = ipi_call_interrupt,
393         .flags          = IRQF_PERCPU,
394         .name           = "IPI call"
395 };
396
397 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
398                                     struct irqaction *action)
399 {
400         int virq = irq_create_mapping(gic_irq_domain, intr);
401         int i;
402
403         GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
404         for (i = 0; i < NR_CPUS; i++)
405                 clear_bit(intr, pcpu_masks[i].pcpu_mask);
406         set_bit(intr, pcpu_masks[cpu].pcpu_mask);
407
408         irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
409
410         irq_set_handler(virq, handle_percpu_irq);
411         setup_irq(virq, action);
412 }
413
414 static __init void gic_ipi_init(void)
415 {
416         int i;
417
418         /* Use last 2 * NR_CPUS interrupts as IPIs */
419         gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
420         gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
421
422         for (i = 0; i < nr_cpu_ids; i++) {
423                 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
424                 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
425         }
426 }
427 #else
428 static inline void gic_ipi_init(void)
429 {
430 }
431 #endif
432
433 static void __init gic_basic_init(int numvpes)
434 {
435         unsigned int i;
436
437         board_bind_eic_interrupt = &gic_bind_eic_interrupt;
438
439         /* Setup defaults */
440         for (i = 0; i < gic_shared_intrs; i++) {
441                 GIC_SET_POLARITY(i, GIC_POL_POS);
442                 GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
443                 GIC_CLR_INTR_MASK(i);
444         }
445
446         vpe_local_setup(numvpes);
447 }
448
449 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
450                               irq_hw_number_t hw)
451 {
452         unsigned long flags;
453
454         irq_set_chip_and_handler(virq, &gic_level_irq_controller,
455                                  handle_level_irq);
456
457         spin_lock_irqsave(&gic_lock, flags);
458         GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(hw)),
459                  GIC_MAP_TO_PIN_MSK | gic_cpu_pin);
460         /* Map to VPE 0 by default */
461         GIC_SH_MAP_TO_VPE_SMASK(hw, 0);
462         set_bit(hw, pcpu_masks[0].pcpu_mask);
463         spin_unlock_irqrestore(&gic_lock, flags);
464
465         return 0;
466 }
467
468 static struct irq_domain_ops gic_irq_domain_ops = {
469         .map = gic_irq_domain_map,
470         .xlate = irq_domain_xlate_twocell,
471 };
472
473 void __init gic_init(unsigned long gic_base_addr,
474                      unsigned long gic_addrspace_size, unsigned int cpu_vec,
475                      unsigned int irqbase)
476 {
477         unsigned int gicconfig;
478         int numvpes, numintrs;
479
480         _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
481                                                     gic_addrspace_size);
482
483         GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
484         gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
485                    GIC_SH_CONFIG_NUMINTRS_SHF;
486         gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
487
488         numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
489                   GIC_SH_CONFIG_NUMVPES_SHF;
490         numvpes = numvpes + 1;
491
492         if (cpu_has_veic) {
493                 /* Always use vector 1 in EIC mode */
494                 gic_cpu_pin = 0;
495                 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
496                                __gic_irq_dispatch);
497         } else {
498                 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
499                 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
500                                         gic_irq_dispatch);
501         }
502
503         gic_irq_domain = irq_domain_add_simple(NULL, gic_shared_intrs, irqbase,
504                                                &gic_irq_domain_ops, NULL);
505         if (!gic_irq_domain)
506                 panic("Failed to add GIC IRQ domain");
507
508         gic_basic_init(numvpes);
509
510         gic_ipi_init();
511 }