clocksource: exynos_mct: Use readl_relaxed/writel_relaxed
[cascardo/linux.git] / drivers / irqchip / exynos-combiner.c
1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * Combiner irqchip for EXYNOS
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/io.h>
15 #include <linux/slab.h>
16 #include <linux/irqdomain.h>
17 #include <linux/irqchip/chained_irq.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20
21 #include "irqchip.h"
22
23 #define COMBINER_ENABLE_SET     0x0
24 #define COMBINER_ENABLE_CLEAR   0x4
25 #define COMBINER_INT_STATUS     0xC
26
27 #define IRQ_IN_COMBINER         8
28
29 static DEFINE_SPINLOCK(irq_controller_lock);
30
31 struct combiner_chip_data {
32         unsigned int hwirq_offset;
33         unsigned int irq_mask;
34         void __iomem *base;
35         unsigned int parent_irq;
36 };
37
38 static struct irq_domain *combiner_irq_domain;
39
40 static inline void __iomem *combiner_base(struct irq_data *data)
41 {
42         struct combiner_chip_data *combiner_data =
43                 irq_data_get_irq_chip_data(data);
44
45         return combiner_data->base;
46 }
47
48 static void combiner_mask_irq(struct irq_data *data)
49 {
50         u32 mask = 1 << (data->hwirq % 32);
51
52         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
53 }
54
55 static void combiner_unmask_irq(struct irq_data *data)
56 {
57         u32 mask = 1 << (data->hwirq % 32);
58
59         __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
60 }
61
62 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
63 {
64         struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
65         struct irq_chip *chip = irq_get_chip(irq);
66         unsigned int cascade_irq, combiner_irq;
67         unsigned long status;
68
69         chained_irq_enter(chip, desc);
70
71         spin_lock(&irq_controller_lock);
72         status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
73         spin_unlock(&irq_controller_lock);
74         status &= chip_data->irq_mask;
75
76         if (status == 0)
77                 goto out;
78
79         combiner_irq = chip_data->hwirq_offset + __ffs(status);
80         cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
81
82         if (unlikely(!cascade_irq))
83                 handle_bad_irq(irq, desc);
84         else
85                 generic_handle_irq(cascade_irq);
86
87  out:
88         chained_irq_exit(chip, desc);
89 }
90
91 #ifdef CONFIG_SMP
92 static int combiner_set_affinity(struct irq_data *d,
93                                  const struct cpumask *mask_val, bool force)
94 {
95         struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
96         struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
97         struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
98
99         if (chip && chip->irq_set_affinity)
100                 return chip->irq_set_affinity(data, mask_val, force);
101         else
102                 return -EINVAL;
103 }
104 #endif
105
106 static struct irq_chip combiner_chip = {
107         .name                   = "COMBINER",
108         .irq_mask               = combiner_mask_irq,
109         .irq_unmask             = combiner_unmask_irq,
110 #ifdef CONFIG_SMP
111         .irq_set_affinity       = combiner_set_affinity,
112 #endif
113 };
114
115 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
116                                         unsigned int irq)
117 {
118         if (irq_set_handler_data(irq, combiner_data) != 0)
119                 BUG();
120         irq_set_chained_handler(irq, combiner_handle_cascade_irq);
121 }
122
123 static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
124                                      unsigned int combiner_nr,
125                                      void __iomem *base, unsigned int irq)
126 {
127         combiner_data->base = base;
128         combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
129         combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
130         combiner_data->parent_irq = irq;
131
132         /* Disable all interrupts */
133         __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
134 }
135
136 static int combiner_irq_domain_xlate(struct irq_domain *d,
137                                      struct device_node *controller,
138                                      const u32 *intspec, unsigned int intsize,
139                                      unsigned long *out_hwirq,
140                                      unsigned int *out_type)
141 {
142         if (d->of_node != controller)
143                 return -EINVAL;
144
145         if (intsize < 2)
146                 return -EINVAL;
147
148         *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
149         *out_type = 0;
150
151         return 0;
152 }
153
154 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
155                                    irq_hw_number_t hw)
156 {
157         struct combiner_chip_data *combiner_data = d->host_data;
158
159         irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
160         irq_set_chip_data(irq, &combiner_data[hw >> 3]);
161         set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
162
163         return 0;
164 }
165
166 static struct irq_domain_ops combiner_irq_domain_ops = {
167         .xlate  = combiner_irq_domain_xlate,
168         .map    = combiner_irq_domain_map,
169 };
170
171 static void __init combiner_init(void __iomem *combiner_base,
172                                  struct device_node *np,
173                                  unsigned int max_nr)
174 {
175         int i, irq;
176         unsigned int nr_irq;
177         struct combiner_chip_data *combiner_data;
178
179         nr_irq = max_nr * IRQ_IN_COMBINER;
180
181         combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
182         if (!combiner_data) {
183                 pr_warning("%s: could not allocate combiner data\n", __func__);
184                 return;
185         }
186
187         combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
188                                 &combiner_irq_domain_ops, combiner_data);
189         if (WARN_ON(!combiner_irq_domain)) {
190                 pr_warning("%s: irq domain init failed\n", __func__);
191                 return;
192         }
193
194         for (i = 0; i < max_nr; i++) {
195                 irq = irq_of_parse_and_map(np, i);
196
197                 combiner_init_one(&combiner_data[i], i,
198                                   combiner_base + (i >> 2) * 0x10, irq);
199                 combiner_cascade_irq(&combiner_data[i], irq);
200         }
201 }
202
203 static int __init combiner_of_init(struct device_node *np,
204                                    struct device_node *parent)
205 {
206         void __iomem *combiner_base;
207         unsigned int max_nr = 20;
208
209         combiner_base = of_iomap(np, 0);
210         if (!combiner_base) {
211                 pr_err("%s: failed to map combiner registers\n", __func__);
212                 return -ENXIO;
213         }
214
215         if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
216                 pr_info("%s: number of combiners not specified, "
217                         "setting default as %d.\n",
218                         __func__, max_nr);
219         }
220
221         combiner_init(combiner_base, np, max_nr);
222
223         return 0;
224 }
225 IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
226                 combiner_of_init);