Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 8 Jun 2011 02:21:11 +0000 (19:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 8 Jun 2011 02:21:11 +0000 (19:21 -0700)
* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  genirq: Ensure we locate the passed IRQ in irq_alloc_descs()
  genirq: Fix descriptor init on non-sparse IRQs
  irq: Handle spurios irq detection for threaded irqs
  genirq: Print threaded handler in spurious debug output

include/linux/irqreturn.h
kernel/irq/handle.c
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/spurious.c

index 819acaa..714ba08 100644 (file)
@@ -8,9 +8,9 @@
  * @IRQ_WAKE_THREAD    handler requests to wake the handler thread
  */
 enum irqreturn {
-       IRQ_NONE,
-       IRQ_HANDLED,
-       IRQ_WAKE_THREAD,
+       IRQ_NONE                = (0 << 0),
+       IRQ_HANDLED             = (1 << 0),
+       IRQ_WAKE_THREAD         = (1 << 1),
 };
 
 typedef enum irqreturn irqreturn_t;
index 90cb55f..470d08c 100644 (file)
@@ -132,12 +132,6 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 
                switch (res) {
                case IRQ_WAKE_THREAD:
-                       /*
-                        * Set result to handled so the spurious check
-                        * does not trigger.
-                        */
-                       res = IRQ_HANDLED;
-
                        /*
                         * Catch drivers which return WAKE_THREAD but
                         * did not set up a thread function
index 886e803..4c60a50 100644 (file)
@@ -257,13 +257,11 @@ int __init early_irq_init(void)
        count = ARRAY_SIZE(irq_desc);
 
        for (i = 0; i < count; i++) {
-               desc[i].irq_data.irq = i;
-               desc[i].irq_data.chip = &no_irq_chip;
                desc[i].kstat_irqs = alloc_percpu(unsigned int);
-               irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
-               alloc_masks(desc + i, GFP_KERNEL, node);
-               desc_smp_init(desc + i, node);
+               alloc_masks(&desc[i], GFP_KERNEL, node);
+               raw_spin_lock_init(&desc[i].lock);
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+               desc_set_defaults(i, &desc[i], node);
        }
        return arch_early_irq_init();
 }
@@ -346,6 +344,12 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
        if (!cnt)
                return -EINVAL;
 
+       if (irq >= 0) {
+               if (from > irq)
+                       return -EINVAL;
+               from = irq;
+       }
+
        mutex_lock(&sparse_irq_lock);
 
        start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
index f7ce002..d64bafb 100644 (file)
@@ -723,13 +723,16 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  * context. So we need to disable bh here to avoid deadlocks and other
  * side effects.
  */
-static void
+static irqreturn_t
 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 {
+       irqreturn_t ret;
+
        local_bh_disable();
-       action->thread_fn(action->irq, action->dev_id);
+       ret = action->thread_fn(action->irq, action->dev_id);
        irq_finalize_oneshot(desc, action, false);
        local_bh_enable();
+       return ret;
 }
 
 /*
@@ -737,10 +740,14 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  * preemtible - many of them need to sleep and wait for slow busses to
  * complete.
  */
-static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
+static irqreturn_t irq_thread_fn(struct irq_desc *desc,
+               struct irqaction *action)
 {
-       action->thread_fn(action->irq, action->dev_id);
+       irqreturn_t ret;
+
+       ret = action->thread_fn(action->irq, action->dev_id);
        irq_finalize_oneshot(desc, action, false);
+       return ret;
 }
 
 /*
@@ -753,7 +760,8 @@ static int irq_thread(void *data)
        };
        struct irqaction *action = data;
        struct irq_desc *desc = irq_to_desc(action->irq);
-       void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
+       irqreturn_t (*handler_fn)(struct irq_desc *desc,
+                       struct irqaction *action);
        int wake;
 
        if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
@@ -783,8 +791,12 @@ static int irq_thread(void *data)
                        desc->istate |= IRQS_PENDING;
                        raw_spin_unlock_irq(&desc->lock);
                } else {
+                       irqreturn_t action_ret;
+
                        raw_spin_unlock_irq(&desc->lock);
-                       handler_fn(desc, action);
+                       action_ret = handler_fn(desc, action);
+                       if (!noirqdebug)
+                               note_interrupt(action->irq, desc, action_ret);
                }
 
                wake = atomic_dec_and_test(&desc->threads_active);
index dfbd550..aa57d5d 100644 (file)
@@ -167,6 +167,13 @@ out:
                  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
 }
 
+static inline int bad_action_ret(irqreturn_t action_ret)
+{
+       if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
+               return 0;
+       return 1;
+}
+
 /*
  * If 99,900 of the previous 100,000 interrupts have not been handled
  * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -182,7 +189,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
        struct irqaction *action;
        unsigned long flags;
 
-       if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+       if (bad_action_ret(action_ret)) {
                printk(KERN_ERR "irq event %d: bogus return value %x\n",
                                irq, action_ret);
        } else {
@@ -201,10 +208,11 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
        raw_spin_lock_irqsave(&desc->lock, flags);
        action = desc->action;
        while (action) {
-               printk(KERN_ERR "[<%p>]", action->handler);
-               print_symbol(" (%s)",
-                       (unsigned long)action->handler);
-               printk("\n");
+               printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
+               if (action->thread_fn)
+                       printk(KERN_CONT " threaded [<%p>] %pf",
+                                       action->thread_fn, action->thread_fn);
+               printk(KERN_CONT "\n");
                action = action->next;
        }
        raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -262,7 +270,16 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
        if (desc->istate & IRQS_POLL_INPROGRESS)
                return;
 
-       if (unlikely(action_ret != IRQ_HANDLED)) {
+       /* we get here again via the threaded handler */
+       if (action_ret == IRQ_WAKE_THREAD)
+               return;
+
+       if (bad_action_ret(action_ret)) {
+               report_bad_irq(irq, desc, action_ret);
+               return;
+       }
+
+       if (unlikely(action_ret == IRQ_NONE)) {
                /*
                 * If we are seeing only the odd spurious IRQ caused by
                 * bus asynchronicity then don't eventually trigger an error,
@@ -274,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
                else
                        desc->irqs_unhandled++;
                desc->last_unhandled = jiffies;
-               if (unlikely(action_ret != IRQ_NONE))
-                       report_bad_irq(irq, desc, action_ret);
        }
 
        if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {