Merge remote-tracking branches 'regulator/topic/da9211', 'regulator/topic/getreg...
[cascardo/linux.git] / kernel / irq / irqdesc.c
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/DocBook/genericirq
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17
18 #include "internals.h"
19
20 /*
21  * lockdep: we want to handle all irq_desc locks as a single lock-class:
22  */
23 static struct lock_class_key irq_desc_lock_class;
24
25 #if defined(CONFIG_SMP)
26 static void __init init_irq_default_affinity(void)
27 {
28         alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29         cpumask_setall(irq_default_affinity);
30 }
31 #else
32 static void __init init_irq_default_affinity(void)
33 {
34 }
35 #endif
36
37 #ifdef CONFIG_SMP
38 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39 {
40         if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41                 return -ENOMEM;
42
43 #ifdef CONFIG_GENERIC_PENDING_IRQ
44         if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45                 free_cpumask_var(desc->irq_data.affinity);
46                 return -ENOMEM;
47         }
48 #endif
49         return 0;
50 }
51
52 static void desc_smp_init(struct irq_desc *desc, int node)
53 {
54         desc->irq_data.node = node;
55         cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
56 #ifdef CONFIG_GENERIC_PENDING_IRQ
57         cpumask_clear(desc->pending_mask);
58 #endif
59 }
60
61 static inline int desc_node(struct irq_desc *desc)
62 {
63         return desc->irq_data.node;
64 }
65
66 #else
67 static inline int
68 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
70 static inline int desc_node(struct irq_desc *desc) { return 0; }
71 #endif
72
73 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
74                 struct module *owner)
75 {
76         int cpu;
77
78         desc->irq_data.irq = irq;
79         desc->irq_data.chip = &no_irq_chip;
80         desc->irq_data.chip_data = NULL;
81         desc->irq_data.handler_data = NULL;
82         desc->irq_data.msi_desc = NULL;
83         irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
84         irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
85         desc->handle_irq = handle_bad_irq;
86         desc->depth = 1;
87         desc->irq_count = 0;
88         desc->irqs_unhandled = 0;
89         desc->name = NULL;
90         desc->owner = owner;
91         for_each_possible_cpu(cpu)
92                 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
93         desc_smp_init(desc, node);
94 }
95
96 int nr_irqs = NR_IRQS;
97 EXPORT_SYMBOL_GPL(nr_irqs);
98
99 static DEFINE_MUTEX(sparse_irq_lock);
100 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
101
102 #ifdef CONFIG_SPARSE_IRQ
103
104 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
105
106 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
107 {
108         radix_tree_insert(&irq_desc_tree, irq, desc);
109 }
110
111 struct irq_desc *irq_to_desc(unsigned int irq)
112 {
113         return radix_tree_lookup(&irq_desc_tree, irq);
114 }
115 EXPORT_SYMBOL(irq_to_desc);
116
117 static void delete_irq_desc(unsigned int irq)
118 {
119         radix_tree_delete(&irq_desc_tree, irq);
120 }
121
122 #ifdef CONFIG_SMP
123 static void free_masks(struct irq_desc *desc)
124 {
125 #ifdef CONFIG_GENERIC_PENDING_IRQ
126         free_cpumask_var(desc->pending_mask);
127 #endif
128         free_cpumask_var(desc->irq_data.affinity);
129 }
130 #else
131 static inline void free_masks(struct irq_desc *desc) { }
132 #endif
133
134 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
135 {
136         struct irq_desc *desc;
137         gfp_t gfp = GFP_KERNEL;
138
139         desc = kzalloc_node(sizeof(*desc), gfp, node);
140         if (!desc)
141                 return NULL;
142         /* allocate based on nr_cpu_ids */
143         desc->kstat_irqs = alloc_percpu(unsigned int);
144         if (!desc->kstat_irqs)
145                 goto err_desc;
146
147         if (alloc_masks(desc, gfp, node))
148                 goto err_kstat;
149
150         raw_spin_lock_init(&desc->lock);
151         lockdep_set_class(&desc->lock, &irq_desc_lock_class);
152
153         desc_set_defaults(irq, desc, node, owner);
154
155         return desc;
156
157 err_kstat:
158         free_percpu(desc->kstat_irqs);
159 err_desc:
160         kfree(desc);
161         return NULL;
162 }
163
164 static void free_desc(unsigned int irq)
165 {
166         struct irq_desc *desc = irq_to_desc(irq);
167
168         unregister_irq_proc(irq, desc);
169
170         mutex_lock(&sparse_irq_lock);
171         delete_irq_desc(irq);
172         mutex_unlock(&sparse_irq_lock);
173
174         free_masks(desc);
175         free_percpu(desc->kstat_irqs);
176         kfree(desc);
177 }
178
179 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
180                        struct module *owner)
181 {
182         struct irq_desc *desc;
183         int i;
184
185         for (i = 0; i < cnt; i++) {
186                 desc = alloc_desc(start + i, node, owner);
187                 if (!desc)
188                         goto err;
189                 mutex_lock(&sparse_irq_lock);
190                 irq_insert_desc(start + i, desc);
191                 mutex_unlock(&sparse_irq_lock);
192         }
193         return start;
194
195 err:
196         for (i--; i >= 0; i--)
197                 free_desc(start + i);
198
199         mutex_lock(&sparse_irq_lock);
200         bitmap_clear(allocated_irqs, start, cnt);
201         mutex_unlock(&sparse_irq_lock);
202         return -ENOMEM;
203 }
204
205 static int irq_expand_nr_irqs(unsigned int nr)
206 {
207         if (nr > IRQ_BITMAP_BITS)
208                 return -ENOMEM;
209         nr_irqs = nr;
210         return 0;
211 }
212
213 int __init early_irq_init(void)
214 {
215         int i, initcnt, node = first_online_node;
216         struct irq_desc *desc;
217
218         init_irq_default_affinity();
219
220         /* Let arch update nr_irqs and return the nr of preallocated irqs */
221         initcnt = arch_probe_nr_irqs();
222         printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
223
224         if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
225                 nr_irqs = IRQ_BITMAP_BITS;
226
227         if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
228                 initcnt = IRQ_BITMAP_BITS;
229
230         if (initcnt > nr_irqs)
231                 nr_irqs = initcnt;
232
233         for (i = 0; i < initcnt; i++) {
234                 desc = alloc_desc(i, node, NULL);
235                 set_bit(i, allocated_irqs);
236                 irq_insert_desc(i, desc);
237         }
238         return arch_early_irq_init();
239 }
240
241 #else /* !CONFIG_SPARSE_IRQ */
242
243 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
244         [0 ... NR_IRQS-1] = {
245                 .handle_irq     = handle_bad_irq,
246                 .depth          = 1,
247                 .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
248         }
249 };
250
251 int __init early_irq_init(void)
252 {
253         int count, i, node = first_online_node;
254         struct irq_desc *desc;
255
256         init_irq_default_affinity();
257
258         printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
259
260         desc = irq_desc;
261         count = ARRAY_SIZE(irq_desc);
262
263         for (i = 0; i < count; i++) {
264                 desc[i].kstat_irqs = alloc_percpu(unsigned int);
265                 alloc_masks(&desc[i], GFP_KERNEL, node);
266                 raw_spin_lock_init(&desc[i].lock);
267                 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
268                 desc_set_defaults(i, &desc[i], node, NULL);
269         }
270         return arch_early_irq_init();
271 }
272
273 struct irq_desc *irq_to_desc(unsigned int irq)
274 {
275         return (irq < NR_IRQS) ? irq_desc + irq : NULL;
276 }
277 EXPORT_SYMBOL(irq_to_desc);
278
279 static void free_desc(unsigned int irq)
280 {
281         struct irq_desc *desc = irq_to_desc(irq);
282         unsigned long flags;
283
284         raw_spin_lock_irqsave(&desc->lock, flags);
285         desc_set_defaults(irq, desc, desc_node(desc), NULL);
286         raw_spin_unlock_irqrestore(&desc->lock, flags);
287 }
288
289 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
290                               struct module *owner)
291 {
292         u32 i;
293
294         for (i = 0; i < cnt; i++) {
295                 struct irq_desc *desc = irq_to_desc(start + i);
296
297                 desc->owner = owner;
298         }
299         return start;
300 }
301
302 static int irq_expand_nr_irqs(unsigned int nr)
303 {
304         return -ENOMEM;
305 }
306
307 void irq_mark_irq(unsigned int irq)
308 {
309         mutex_lock(&sparse_irq_lock);
310         bitmap_set(allocated_irqs, irq, 1);
311         mutex_unlock(&sparse_irq_lock);
312 }
313
314 #ifdef CONFIG_GENERIC_IRQ_LEGACY
315 void irq_init_desc(unsigned int irq)
316 {
317         free_desc(irq);
318 }
319 #endif
320
321 #endif /* !CONFIG_SPARSE_IRQ */
322
323 /**
324  * generic_handle_irq - Invoke the handler for a particular irq
325  * @irq:        The irq number to handle
326  *
327  */
328 int generic_handle_irq(unsigned int irq)
329 {
330         struct irq_desc *desc = irq_to_desc(irq);
331
332         if (!desc)
333                 return -EINVAL;
334         generic_handle_irq_desc(irq, desc);
335         return 0;
336 }
337 EXPORT_SYMBOL_GPL(generic_handle_irq);
338
339 /* Dynamic interrupt handling */
340
341 /**
342  * irq_free_descs - free irq descriptors
343  * @from:       Start of descriptor range
344  * @cnt:        Number of consecutive irqs to free
345  */
346 void irq_free_descs(unsigned int from, unsigned int cnt)
347 {
348         int i;
349
350         if (from >= nr_irqs || (from + cnt) > nr_irqs)
351                 return;
352
353         for (i = 0; i < cnt; i++)
354                 free_desc(from + i);
355
356         mutex_lock(&sparse_irq_lock);
357         bitmap_clear(allocated_irqs, from, cnt);
358         mutex_unlock(&sparse_irq_lock);
359 }
360 EXPORT_SYMBOL_GPL(irq_free_descs);
361
362 /**
363  * irq_alloc_descs - allocate and initialize a range of irq descriptors
364  * @irq:        Allocate for specific irq number if irq >= 0
365  * @from:       Start the search from this irq number
366  * @cnt:        Number of consecutive irqs to allocate.
367  * @node:       Preferred node on which the irq descriptor should be allocated
368  * @owner:      Owning module (can be NULL)
369  *
370  * Returns the first irq number or error code
371  */
372 int __ref
373 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
374                   struct module *owner)
375 {
376         int start, ret;
377
378         if (!cnt)
379                 return -EINVAL;
380
381         if (irq >= 0) {
382                 if (from > irq)
383                         return -EINVAL;
384                 from = irq;
385         } else {
386                 /*
387                  * For interrupts which are freely allocated the
388                  * architecture can force a lower bound to the @from
389                  * argument. x86 uses this to exclude the GSI space.
390                  */
391                 from = arch_dynirq_lower_bound(from);
392         }
393
394         mutex_lock(&sparse_irq_lock);
395
396         start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
397                                            from, cnt, 0);
398         ret = -EEXIST;
399         if (irq >=0 && start != irq)
400                 goto err;
401
402         if (start + cnt > nr_irqs) {
403                 ret = irq_expand_nr_irqs(start + cnt);
404                 if (ret)
405                         goto err;
406         }
407
408         bitmap_set(allocated_irqs, start, cnt);
409         mutex_unlock(&sparse_irq_lock);
410         return alloc_descs(start, cnt, node, owner);
411
412 err:
413         mutex_unlock(&sparse_irq_lock);
414         return ret;
415 }
416 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
417
418 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
419 /**
420  * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
421  * @cnt:        number of interrupts to allocate
422  * @node:       node on which to allocate
423  *
424  * Returns an interrupt number > 0 or 0, if the allocation fails.
425  */
426 unsigned int irq_alloc_hwirqs(int cnt, int node)
427 {
428         int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
429
430         if (irq < 0)
431                 return 0;
432
433         for (i = irq; cnt > 0; i++, cnt--) {
434                 if (arch_setup_hwirq(i, node))
435                         goto err;
436                 irq_clear_status_flags(i, _IRQ_NOREQUEST);
437         }
438         return irq;
439
440 err:
441         for (i--; i >= irq; i--) {
442                 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
443                 arch_teardown_hwirq(i);
444         }
445         irq_free_descs(irq, cnt);
446         return 0;
447 }
448 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
449
450 /**
451  * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
452  * @from:       Free from irq number
453  * @cnt:        number of interrupts to free
454  *
455  */
456 void irq_free_hwirqs(unsigned int from, int cnt)
457 {
458         int i, j;
459
460         for (i = from, j = cnt; j > 0; i++, j--) {
461                 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
462                 arch_teardown_hwirq(i);
463         }
464         irq_free_descs(from, cnt);
465 }
466 EXPORT_SYMBOL_GPL(irq_free_hwirqs);
467 #endif
468
469 /**
470  * irq_get_next_irq - get next allocated irq number
471  * @offset:     where to start the search
472  *
473  * Returns next irq number after offset or nr_irqs if none is found.
474  */
475 unsigned int irq_get_next_irq(unsigned int offset)
476 {
477         return find_next_bit(allocated_irqs, nr_irqs, offset);
478 }
479
480 struct irq_desc *
481 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
482                     unsigned int check)
483 {
484         struct irq_desc *desc = irq_to_desc(irq);
485
486         if (desc) {
487                 if (check & _IRQ_DESC_CHECK) {
488                         if ((check & _IRQ_DESC_PERCPU) &&
489                             !irq_settings_is_per_cpu_devid(desc))
490                                 return NULL;
491
492                         if (!(check & _IRQ_DESC_PERCPU) &&
493                             irq_settings_is_per_cpu_devid(desc))
494                                 return NULL;
495                 }
496
497                 if (bus)
498                         chip_bus_lock(desc);
499                 raw_spin_lock_irqsave(&desc->lock, *flags);
500         }
501         return desc;
502 }
503
504 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
505 {
506         raw_spin_unlock_irqrestore(&desc->lock, flags);
507         if (bus)
508                 chip_bus_sync_unlock(desc);
509 }
510
511 int irq_set_percpu_devid(unsigned int irq)
512 {
513         struct irq_desc *desc = irq_to_desc(irq);
514
515         if (!desc)
516                 return -EINVAL;
517
518         if (desc->percpu_enabled)
519                 return -EINVAL;
520
521         desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
522
523         if (!desc->percpu_enabled)
524                 return -ENOMEM;
525
526         irq_set_percpu_devid_flags(irq);
527         return 0;
528 }
529
530 void kstat_incr_irq_this_cpu(unsigned int irq)
531 {
532         kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
533 }
534
535 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
536 {
537         struct irq_desc *desc = irq_to_desc(irq);
538
539         return desc && desc->kstat_irqs ?
540                         *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
541 }
542
543 unsigned int kstat_irqs(unsigned int irq)
544 {
545         struct irq_desc *desc = irq_to_desc(irq);
546         int cpu;
547         int sum = 0;
548
549         if (!desc || !desc->kstat_irqs)
550                 return 0;
551         for_each_possible_cpu(cpu)
552                 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
553         return sum;
554 }