2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
20 #include <asm/uaccess.h>
22 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
23 EXPORT_PER_CPU_SYMBOL(irq_stat);
25 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26 EXPORT_PER_CPU_SYMBOL(irq_regs);
29 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves.
32 void ack_bad_irq(unsigned int irq)
34 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
36 #ifdef CONFIG_X86_LOCAL_APIC
38 * Currently unexpected vectors happen only on SMP and APIC.
39 * We _must_ ack these because every local APIC has only N
40 * irq slots per priority level, and a 'hanging, unacked' IRQ
41 * holds up an irq slot - in excessive cases (when multiple
42 * unexpected vectors occur) that might lock up the APIC
44 * But only ack when the APIC is enabled -AK
51 #ifdef CONFIG_4KSTACKS
53 * per-CPU IRQ handling contexts (thread information and stack)
56 struct thread_info tinfo;
57 u32 stack[THREAD_SIZE/sizeof(u32)];
60 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
61 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
64 static void stack_overflow(void)
66 printk("low stack detected by irq handler\n");
70 static inline void call_on_stack2(void *func, void *stack,
71 unsigned long arg1, unsigned long arg2)
75 " xchgl %%ebx,%%esp \n"
77 " movl %%ebx,%%esp \n"
78 : "=a" (arg1), "=d" (arg2), "=b" (bx)
79 : "0" (arg1), "1" (arg2), "2" (stack),
81 : "memory", "cc", "ecx");
85 * do_IRQ handles all normal device IRQ's (the special
86 * SMP cross-CPU interrupts have their own specific
89 unsigned int do_IRQ(struct pt_regs *regs)
91 struct pt_regs *old_regs;
92 /* high bit used in ret_from_ code */
93 int irq = ~regs->orig_ax;
94 struct irq_desc *desc = irq_desc + irq;
95 #ifdef CONFIG_4KSTACKS
96 union irq_ctx *curctx, *irqctx;
101 if (unlikely((unsigned)irq >= NR_IRQS)) {
102 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
107 old_regs = set_irq_regs(regs);
109 #ifdef CONFIG_DEBUG_STACKOVERFLOW
110 /* Debugging check for stack overflow: is there less than 1KB free? */
114 __asm__ __volatile__("andl %%esp,%0" :
115 "=r" (sp) : "0" (THREAD_SIZE - 1));
116 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN)))
121 #ifdef CONFIG_4KSTACKS
123 curctx = (union irq_ctx *) current_thread_info();
124 irqctx = hardirq_ctx[smp_processor_id()];
127 * this is where we switch to the IRQ stack. However, if we are
128 * already using the IRQ stack (because we interrupted a hardirq
129 * handler) we can't do that and just have to keep using the
130 * current stack (which is the irq stack already after all)
132 if (curctx != irqctx) {
133 /* build the stack frame on the IRQ stack */
134 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
135 irqctx->tinfo.task = curctx->tinfo.task;
136 irqctx->tinfo.previous_esp = current_stack_pointer;
139 * Copy the softirq bits in preempt_count so that the
140 * softirq checks work in the hardirq context.
142 irqctx->tinfo.preempt_count =
143 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
144 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
146 /* Execute warning on interrupt stack */
147 if (unlikely(overflow))
148 call_on_stack2(stack_overflow, isp, 0, 0);
150 call_on_stack2(desc->handle_irq, isp, irq, (unsigned long)desc);
154 /* AK: Slightly bogus here */
157 desc->handle_irq(irq, desc);
161 set_irq_regs(old_regs);
165 #ifdef CONFIG_4KSTACKS
167 static char softirq_stack[NR_CPUS * THREAD_SIZE]
168 __attribute__((__section__(".bss.page_aligned")));
170 static char hardirq_stack[NR_CPUS * THREAD_SIZE]
171 __attribute__((__section__(".bss.page_aligned")));
174 * allocate per-cpu stacks for hardirq and for softirq processing
176 void irq_ctx_init(int cpu)
178 union irq_ctx *irqctx;
180 if (hardirq_ctx[cpu])
183 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
184 irqctx->tinfo.task = NULL;
185 irqctx->tinfo.exec_domain = NULL;
186 irqctx->tinfo.cpu = cpu;
187 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
188 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
190 hardirq_ctx[cpu] = irqctx;
192 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
193 irqctx->tinfo.task = NULL;
194 irqctx->tinfo.exec_domain = NULL;
195 irqctx->tinfo.cpu = cpu;
196 irqctx->tinfo.preempt_count = 0;
197 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
199 softirq_ctx[cpu] = irqctx;
201 printk("CPU %u irqstacks, hard=%p soft=%p\n",
202 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
205 void irq_ctx_exit(int cpu)
207 hardirq_ctx[cpu] = NULL;
210 asmlinkage void do_softirq(void)
213 struct thread_info *curctx;
214 union irq_ctx *irqctx;
220 local_irq_save(flags);
222 if (local_softirq_pending()) {
223 curctx = current_thread_info();
224 irqctx = softirq_ctx[smp_processor_id()];
225 irqctx->tinfo.task = curctx->task;
226 irqctx->tinfo.previous_esp = current_stack_pointer;
228 /* build the stack frame on the softirq stack */
229 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
232 " xchgl %%ebx,%%esp \n"
233 " call __do_softirq \n"
234 " movl %%ebx,%%esp \n"
237 : "memory", "cc", "edx", "ecx", "eax"
240 * Shouldnt happen, we returned above if in_interrupt():
242 WARN_ON_ONCE(softirq_count());
245 local_irq_restore(flags);
250 * Interrupt statistics:
253 atomic_t irq_err_count;
256 * /proc/interrupts printing:
259 int show_interrupts(struct seq_file *p, void *v)
261 int i = *(loff_t *) v, j;
262 struct irqaction * action;
267 for_each_online_cpu(j)
268 seq_printf(p, "CPU%-8d",j);
273 unsigned any_count = 0;
275 spin_lock_irqsave(&irq_desc[i].lock, flags);
277 any_count = kstat_irqs(i);
279 for_each_online_cpu(j)
280 any_count |= kstat_cpu(j).irqs[i];
282 action = irq_desc[i].action;
283 if (!action && !any_count)
285 seq_printf(p, "%3d: ",i);
287 seq_printf(p, "%10u ", kstat_irqs(i));
289 for_each_online_cpu(j)
290 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
292 seq_printf(p, " %8s", irq_desc[i].chip->name);
293 seq_printf(p, "-%-8s", irq_desc[i].name);
296 seq_printf(p, " %s", action->name);
297 while ((action = action->next) != NULL)
298 seq_printf(p, ", %s", action->name);
303 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
304 } else if (i == NR_IRQS) {
305 seq_printf(p, "NMI: ");
306 for_each_online_cpu(j)
307 seq_printf(p, "%10u ", nmi_count(j));
308 seq_printf(p, " Non-maskable interrupts\n");
309 #ifdef CONFIG_X86_LOCAL_APIC
310 seq_printf(p, "LOC: ");
311 for_each_online_cpu(j)
312 seq_printf(p, "%10u ",
313 per_cpu(irq_stat,j).apic_timer_irqs);
314 seq_printf(p, " Local timer interrupts\n");
317 seq_printf(p, "RES: ");
318 for_each_online_cpu(j)
319 seq_printf(p, "%10u ",
320 per_cpu(irq_stat,j).irq_resched_count);
321 seq_printf(p, " Rescheduling interrupts\n");
322 seq_printf(p, "CAL: ");
323 for_each_online_cpu(j)
324 seq_printf(p, "%10u ",
325 per_cpu(irq_stat,j).irq_call_count);
326 seq_printf(p, " function call interrupts\n");
327 seq_printf(p, "TLB: ");
328 for_each_online_cpu(j)
329 seq_printf(p, "%10u ",
330 per_cpu(irq_stat,j).irq_tlb_count);
331 seq_printf(p, " TLB shootdowns\n");
333 seq_printf(p, "TRM: ");
334 for_each_online_cpu(j)
335 seq_printf(p, "%10u ",
336 per_cpu(irq_stat,j).irq_thermal_count);
337 seq_printf(p, " Thermal event interrupts\n");
338 seq_printf(p, "SPU: ");
339 for_each_online_cpu(j)
340 seq_printf(p, "%10u ",
341 per_cpu(irq_stat,j).irq_spurious_count);
342 seq_printf(p, " Spurious interrupts\n");
343 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
344 #if defined(CONFIG_X86_IO_APIC)
345 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
351 #ifdef CONFIG_HOTPLUG_CPU
352 #include <mach_apic.h>
354 void fixup_irqs(cpumask_t map)
359 for (irq = 0; irq < NR_IRQS; irq++) {
364 cpus_and(mask, irq_desc[irq].affinity, map);
365 if (any_online_cpu(mask) == NR_CPUS) {
366 printk("Breaking affinity for irq %i\n", irq);
369 if (irq_desc[irq].chip->set_affinity)
370 irq_desc[irq].chip->set_affinity(irq, mask);
371 else if (irq_desc[irq].action && !(warned++))
372 printk("Cannot set affinity for irq %i\n", irq);
377 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
378 [note the nop - the interrupt-enable boundary on x86 is two
379 instructions from sti] - to flush out pending hardirqs and
380 IPIs. After this point nothing is supposed to reach this CPU." */
381 __asm__ __volatile__("sti; nop; cli");
384 /* That doesn't seem sufficient. Give it 1ms. */