2 * linux/arch/arm/plat-pxa/gpio.c
4 * Generic PXA GPIO handling
6 * Author: Nicolas Pitre
7 * Created: Jun 15, 2001
8 * Copyright: MontaVista Software Inc.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/gpio.h>
15 #include <linux/init.h>
16 #include <linux/irq.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/slab.h>
21 #include <mach/gpio-pxa.h>
25 struct pxa_gpio_chip {
26 struct gpio_chip chip;
27 void __iomem *regbase;
30 unsigned long irq_mask;
31 unsigned long irq_edge_rise;
32 unsigned long irq_edge_fall;
35 unsigned long saved_gplr;
36 unsigned long saved_gpdr;
37 unsigned long saved_grer;
38 unsigned long saved_gfer;
52 static DEFINE_SPINLOCK(gpio_lock);
53 static struct pxa_gpio_chip *pxa_gpio_chips;
56 #define for_each_gpio_chip(i, c) \
57 for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
59 static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
61 return container_of(c, struct pxa_gpio_chip, chip)->regbase;
64 static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
66 return &pxa_gpio_chips[gpio_to_bank(gpio)];
69 static inline int gpio_is_pxa_type(int type)
71 return (type & MMP_GPIO) == 0;
74 static inline int gpio_is_mmp_type(int type)
76 return (type & MMP_GPIO) != 0;
79 #ifdef CONFIG_ARCH_PXA
80 static inline int __pxa_gpio_to_irq(int gpio)
82 if (gpio_is_pxa_type(gpio_type))
83 return PXA_GPIO_TO_IRQ(gpio);
87 static inline int __pxa_irq_to_gpio(int irq)
89 if (gpio_is_pxa_type(gpio_type))
90 return irq - PXA_GPIO_TO_IRQ(0);
94 static inline int __pxa_gpio_to_irq(int gpio) { return -1; }
95 static inline int __pxa_irq_to_gpio(int irq) { return -1; }
98 #ifdef CONFIG_ARCH_MMP
99 static inline int __mmp_gpio_to_irq(int gpio)
101 if (gpio_is_mmp_type(gpio_type))
102 return MMP_GPIO_TO_IRQ(gpio);
106 static inline int __mmp_irq_to_gpio(int irq)
108 if (gpio_is_mmp_type(gpio_type))
109 return irq - MMP_GPIO_TO_IRQ(0);
113 static inline int __mmp_gpio_to_irq(int gpio) { return -1; }
114 static inline int __mmp_irq_to_gpio(int irq) { return -1; }
117 static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
121 gpio = chip->base + offset;
122 ret = __pxa_gpio_to_irq(gpio);
125 return __mmp_gpio_to_irq(gpio);
128 int pxa_irq_to_gpio(int irq)
132 ret = __pxa_irq_to_gpio(irq);
135 return __mmp_irq_to_gpio(irq);
138 static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
140 void __iomem *base = gpio_chip_base(chip);
141 uint32_t value, mask = 1 << offset;
144 spin_lock_irqsave(&gpio_lock, flags);
146 value = __raw_readl(base + GPDR_OFFSET);
147 if (__gpio_is_inverted(chip->base + offset))
151 __raw_writel(value, base + GPDR_OFFSET);
153 spin_unlock_irqrestore(&gpio_lock, flags);
157 static int pxa_gpio_direction_output(struct gpio_chip *chip,
158 unsigned offset, int value)
160 void __iomem *base = gpio_chip_base(chip);
161 uint32_t tmp, mask = 1 << offset;
164 __raw_writel(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
166 spin_lock_irqsave(&gpio_lock, flags);
168 tmp = __raw_readl(base + GPDR_OFFSET);
169 if (__gpio_is_inverted(chip->base + offset))
173 __raw_writel(tmp, base + GPDR_OFFSET);
175 spin_unlock_irqrestore(&gpio_lock, flags);
179 static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
181 return __raw_readl(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
184 static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
186 __raw_writel(1 << offset, gpio_chip_base(chip) +
187 (value ? GPSR_OFFSET : GPCR_OFFSET));
190 static int __init pxa_init_gpio_chip(int gpio_end)
192 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
193 struct pxa_gpio_chip *chips;
195 chips = kzalloc(nbanks * sizeof(struct pxa_gpio_chip), GFP_KERNEL);
197 pr_err("%s: failed to allocate GPIO chips\n", __func__);
201 for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) {
202 struct gpio_chip *c = &chips[i].chip;
204 sprintf(chips[i].label, "gpio-%d", i);
205 chips[i].regbase = GPIO_BANK(i);
208 c->label = chips[i].label;
210 c->direction_input = pxa_gpio_direction_input;
211 c->direction_output = pxa_gpio_direction_output;
212 c->get = pxa_gpio_get;
213 c->set = pxa_gpio_set;
214 c->to_irq = pxa_gpio_to_irq;
216 /* number of GPIOs on last bank may be less than 32 */
217 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
220 pxa_gpio_chips = chips;
224 /* Update only those GRERx and GFERx edge detection register bits if those
225 * bits are set in c->irq_mask
227 static inline void update_edge_detect(struct pxa_gpio_chip *c)
231 grer = __raw_readl(c->regbase + GRER_OFFSET) & ~c->irq_mask;
232 gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~c->irq_mask;
233 grer |= c->irq_edge_rise & c->irq_mask;
234 gfer |= c->irq_edge_fall & c->irq_mask;
235 __raw_writel(grer, c->regbase + GRER_OFFSET);
236 __raw_writel(gfer, c->regbase + GFER_OFFSET);
239 static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
241 struct pxa_gpio_chip *c;
242 int gpio = pxa_irq_to_gpio(d->irq);
243 unsigned long gpdr, mask = GPIO_bit(gpio);
245 c = gpio_to_pxachip(gpio);
247 if (type == IRQ_TYPE_PROBE) {
248 /* Don't mess with enabled GPIOs using preconfigured edges or
249 * GPIOs set to alternate function or to output during probe
251 if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio))
254 if (__gpio_is_occupied(gpio))
257 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
260 gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
262 if (__gpio_is_inverted(gpio))
263 __raw_writel(gpdr | mask, c->regbase + GPDR_OFFSET);
265 __raw_writel(gpdr & ~mask, c->regbase + GPDR_OFFSET);
267 if (type & IRQ_TYPE_EDGE_RISING)
268 c->irq_edge_rise |= mask;
270 c->irq_edge_rise &= ~mask;
272 if (type & IRQ_TYPE_EDGE_FALLING)
273 c->irq_edge_fall |= mask;
275 c->irq_edge_fall &= ~mask;
277 update_edge_detect(c);
279 pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
280 ((type & IRQ_TYPE_EDGE_RISING) ? " rising" : ""),
281 ((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
285 static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
287 struct pxa_gpio_chip *c;
288 int loop, gpio, gpio_base, n;
293 for_each_gpio_chip(gpio, c) {
294 gpio_base = c->chip.base;
296 gedr = __raw_readl(c->regbase + GEDR_OFFSET);
297 gedr = gedr & c->irq_mask;
298 __raw_writel(gedr, c->regbase + GEDR_OFFSET);
300 n = find_first_bit(&gedr, BITS_PER_LONG);
301 while (n < BITS_PER_LONG) {
304 generic_handle_irq(gpio_to_irq(gpio_base + n));
305 n = find_next_bit(&gedr, BITS_PER_LONG, n + 1);
311 static void pxa_ack_muxed_gpio(struct irq_data *d)
313 int gpio = pxa_irq_to_gpio(d->irq);
314 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
316 __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
319 static void pxa_mask_muxed_gpio(struct irq_data *d)
321 int gpio = pxa_irq_to_gpio(d->irq);
322 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
325 c->irq_mask &= ~GPIO_bit(gpio);
327 grer = __raw_readl(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
328 gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
329 __raw_writel(grer, c->regbase + GRER_OFFSET);
330 __raw_writel(gfer, c->regbase + GFER_OFFSET);
333 static void pxa_unmask_muxed_gpio(struct irq_data *d)
335 int gpio = pxa_irq_to_gpio(d->irq);
336 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
338 c->irq_mask |= GPIO_bit(gpio);
339 update_edge_detect(c);
342 static struct irq_chip pxa_muxed_gpio_chip = {
344 .irq_ack = pxa_ack_muxed_gpio,
345 .irq_mask = pxa_mask_muxed_gpio,
346 .irq_unmask = pxa_unmask_muxed_gpio,
347 .irq_set_type = pxa_gpio_irq_type,
350 void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
352 struct pxa_gpio_chip *c;
357 /* Initialize GPIO chips */
358 pxa_init_gpio_chip(end);
360 /* clear all GPIO edge detects */
361 for_each_gpio_chip(gpio, c) {
362 __raw_writel(0, c->regbase + GFER_OFFSET);
363 __raw_writel(0, c->regbase + GRER_OFFSET);
364 __raw_writel(~0,c->regbase + GEDR_OFFSET);
367 #ifdef CONFIG_ARCH_PXA
368 irq = gpio_to_irq(0);
369 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
371 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
372 irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler);
374 irq = gpio_to_irq(1);
375 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
377 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
378 irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler);
381 for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) {
382 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
384 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
387 /* Install handler for GPIO>=2 edge detect interrupts */
388 irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler);
389 pxa_muxed_gpio_chip.irq_set_wake = fn;
393 static int pxa_gpio_suspend(void)
395 struct pxa_gpio_chip *c;
398 for_each_gpio_chip(gpio, c) {
399 c->saved_gplr = __raw_readl(c->regbase + GPLR_OFFSET);
400 c->saved_gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
401 c->saved_grer = __raw_readl(c->regbase + GRER_OFFSET);
402 c->saved_gfer = __raw_readl(c->regbase + GFER_OFFSET);
404 /* Clear GPIO transition detect bits */
405 __raw_writel(0xffffffff, c->regbase + GEDR_OFFSET);
410 static void pxa_gpio_resume(void)
412 struct pxa_gpio_chip *c;
415 for_each_gpio_chip(gpio, c) {
416 /* restore level with set/clear */
417 __raw_writel( c->saved_gplr, c->regbase + GPSR_OFFSET);
418 __raw_writel(~c->saved_gplr, c->regbase + GPCR_OFFSET);
420 __raw_writel(c->saved_grer, c->regbase + GRER_OFFSET);
421 __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET);
422 __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET);
426 #define pxa_gpio_suspend NULL
427 #define pxa_gpio_resume NULL
430 struct syscore_ops pxa_gpio_syscore_ops = {
431 .suspend = pxa_gpio_suspend,
432 .resume = pxa_gpio_resume,