2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 #include "linux/cpumask.h"
9 #include "linux/hardirq.h"
10 #include "linux/interrupt.h"
11 #include "linux/kernel_stat.h"
12 #include "linux/module.h"
13 #include "linux/sched.h"
14 #include "linux/seq_file.h"
15 #include "linux/slab.h"
16 #include "as-layout.h"
17 #include "kern_util.h"
21 * Generic, controller-independent functions:
24 int show_interrupts(struct seq_file *p, void *v)
26 int i = *(loff_t *) v, j;
27 struct irqaction * action;
32 for_each_online_cpu(j)
33 seq_printf(p, "CPU%d ",j);
38 struct irq_desc *desc = irq_to_desc(i);
40 raw_spin_lock_irqsave(&desc->lock, flags);
41 action = desc->action;
44 seq_printf(p, "%3d: ",i);
46 seq_printf(p, "%10u ", kstat_irqs(i));
48 for_each_online_cpu(j)
49 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
51 seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
52 seq_printf(p, " %s", action->name);
54 for (action=action->next; action; action = action->next)
55 seq_printf(p, ", %s", action->name);
59 raw_spin_unlock_irqrestore(&desc->lock, flags);
60 } else if (i == NR_IRQS)
67 * This list is accessed under irq_lock, except in sigio_handler,
68 * where it is safe from being modified. IRQ handlers won't change it -
69 * if an IRQ source has vanished, it will be freed by free_irqs just
70 * before returning from sigio_handler. That will process a separate
71 * list of irqs to free, with its own locking, coming back here to
72 * remove list elements, taking the irq_lock to do so.
74 static struct irq_fd *active_fds = NULL;
75 static struct irq_fd **last_irq_ptr = &active_fds;
77 extern void free_irqs(void);
79 void sigio_handler(int sig, struct uml_pt_regs *regs)
81 struct irq_fd *irq_fd;
84 if (smp_sigio_handler())
88 n = os_waiting_for_events(active_fds);
95 for (irq_fd = active_fds; irq_fd != NULL;
96 irq_fd = irq_fd->next) {
97 if (irq_fd->current_events != 0) {
98 irq_fd->current_events = 0;
99 do_IRQ(irq_fd->irq, regs);
107 static DEFINE_SPINLOCK(irq_lock);
109 static int activate_fd(int irq, int fd, int type, void *dev_id)
111 struct pollfd *tmp_pfd;
112 struct irq_fd *new_fd, *irq_fd;
116 err = os_set_fd_async(fd);
121 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
125 if (type == IRQ_READ)
126 events = UM_POLLIN | UM_POLLPRI;
127 else events = UM_POLLOUT;
128 *new_fd = ((struct irq_fd) { .next = NULL,
134 .current_events = 0 } );
137 spin_lock_irqsave(&irq_lock, flags);
138 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
139 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
140 printk(KERN_ERR "Registering fd %d twice\n", fd);
141 printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
142 printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
148 if (type == IRQ_WRITE)
155 n = os_create_pollfd(fd, events, tmp_pfd, n);
161 * It means we couldn't put new pollfd to current pollfds
162 * and tmp_fds is NULL or too small for new pollfds array.
163 * Needed size is equal to n as minimum.
165 * Here we have to drop the lock in order to call
166 * kmalloc, which might sleep.
167 * If something else came in and changed the pollfds array
168 * so we will not be able to put new pollfd struct to pollfds
169 * then we free the buffer tmp_fds and try again.
171 spin_unlock_irqrestore(&irq_lock, flags);
174 tmp_pfd = kmalloc(n, GFP_KERNEL);
178 spin_lock_irqsave(&irq_lock, flags);
181 *last_irq_ptr = new_fd;
182 last_irq_ptr = &new_fd->next;
184 spin_unlock_irqrestore(&irq_lock, flags);
187 * This calls activate_fd, so it has to be outside the critical
190 maybe_sigio_broken(fd, (type == IRQ_READ));
195 spin_unlock_irqrestore(&irq_lock, flags);
202 static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
206 spin_lock_irqsave(&irq_lock, flags);
207 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
208 spin_unlock_irqrestore(&irq_lock, flags);
216 static int same_irq_and_dev(struct irq_fd *irq, void *d)
218 struct irq_and_dev *data = d;
220 return ((irq->irq == data->irq) && (irq->id == data->dev));
223 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
225 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
228 free_irq_by_cb(same_irq_and_dev, &data);
231 static int same_fd(struct irq_fd *irq, void *fd)
233 return (irq->fd == *((int *)fd));
236 void free_irq_by_fd(int fd)
238 free_irq_by_cb(same_fd, &fd);
241 /* Must be called with irq_lock held */
242 static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
248 for (irq = active_fds; irq != NULL; irq = irq->next) {
249 if ((irq->fd == fd) && (irq->irq == irqnum))
254 printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
258 fdi = os_get_pollfd(i);
259 if ((fdi != -1) && (fdi != fd)) {
260 printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
261 "and pollfds, fd %d vs %d, need %d\n", irq->fd,
271 void reactivate_fd(int fd, int irqnum)
277 spin_lock_irqsave(&irq_lock, flags);
278 irq = find_irq_by_fd(fd, irqnum, &i);
280 spin_unlock_irqrestore(&irq_lock, flags);
283 os_set_pollfd(i, irq->fd);
284 spin_unlock_irqrestore(&irq_lock, flags);
289 void deactivate_fd(int fd, int irqnum)
295 spin_lock_irqsave(&irq_lock, flags);
296 irq = find_irq_by_fd(fd, irqnum, &i);
298 spin_unlock_irqrestore(&irq_lock, flags);
302 os_set_pollfd(i, -1);
303 spin_unlock_irqrestore(&irq_lock, flags);
309 * Called just before shutdown in order to provide a clean exec
310 * environment in case the system is rebooting. No locking because
311 * that would cause a pointless shutdown hang if something hadn't
314 int deactivate_all_fds(void)
319 for (irq = active_fds; irq != NULL; irq = irq->next) {
320 err = os_clear_fd_async(irq->fd);
324 /* If there is a signal already queued, after unblocking ignore it */
331 * do_IRQ handles all normal device IRQs (the special
332 * SMP cross-CPU interrupts have their own specific
335 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
337 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
339 generic_handle_irq(irq);
341 set_irq_regs(old_regs);
345 int um_request_irq(unsigned int irq, int fd, int type,
346 irq_handler_t handler,
347 unsigned long irqflags, const char * devname,
353 err = activate_fd(irq, fd, type, dev_id);
358 return request_irq(irq, handler, irqflags, devname, dev_id);
361 EXPORT_SYMBOL(um_request_irq);
362 EXPORT_SYMBOL(reactivate_fd);
365 * irq_chip must define at least enable/disable and ack when
366 * the edge handler is used.
368 static void dummy(struct irq_data *d)
372 /* This is used for everything else than the timer. */
373 static struct irq_chip normal_irq_type = {
375 .release = free_irq_by_irq_and_dev,
376 .irq_disable = dummy,
381 static struct irq_chip SIGVTALRM_irq_type = {
383 .release = free_irq_by_irq_and_dev,
384 .irq_disable = dummy,
389 void __init init_IRQ(void)
393 set_irq_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
395 for (i = 1; i < NR_IRQS; i++) {
396 set_irq_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
401 * IRQ stack entry and exit:
403 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
404 * and switch over to the IRQ stack after some preparation. We use
405 * sigaltstack to receive signals on a separate stack from the start.
406 * These two functions make sure the rest of the kernel won't be too
407 * upset by being on a different stack. The IRQ stack has a
408 * thread_info structure at the bottom so that current et al continue
411 * to_irq_stack copies the current task's thread_info to the IRQ stack
412 * thread_info and sets the tasks's stack to point to the IRQ stack.
414 * from_irq_stack copies the thread_info struct back (flags may have
415 * been modified) and resets the task's stack pointer.
419 * What happens when two signals race each other? UML doesn't block
420 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
421 * could arrive while a previous one is still setting up the
424 * There are three cases -
425 * The first interrupt on the stack - sets up the thread_info and
426 * handles the interrupt
427 * A nested interrupt interrupting the copying of the thread_info -
428 * can't handle the interrupt, as the stack is in an unknown state
429 * A nested interrupt not interrupting the copying of the
430 * thread_info - doesn't do any setup, just handles the interrupt
432 * The first job is to figure out whether we interrupted stack setup.
433 * This is done by xchging the signal mask with thread_info->pending.
434 * If the value that comes back is zero, then there is no setup in
435 * progress, and the interrupt can be handled. If the value is
436 * non-zero, then there is stack setup in progress. In order to have
437 * the interrupt handled, we leave our signal in the mask, and it will
438 * be handled by the upper handler after it has set up the stack.
440 * Next is to figure out whether we are the outer handler or a nested
441 * one. As part of setting up the stack, thread_info->real_thread is
442 * set to non-NULL (and is reset to NULL on exit). This is the
443 * nesting indicator. If it is non-NULL, then the stack is already
444 * set up and the handler can run.
447 static unsigned long pending_mask;
449 unsigned long to_irq_stack(unsigned long *mask_out)
451 struct thread_info *ti;
452 unsigned long mask, old;
455 mask = xchg(&pending_mask, *mask_out);
458 * If any interrupts come in at this point, we want to
459 * make sure that their bits aren't lost by our
460 * putting our bit in. So, this loop accumulates bits
461 * until xchg returns the same value that we put in.
462 * When that happens, there were no new interrupts,
463 * and pending_mask contains a bit for each interrupt
469 mask = xchg(&pending_mask, old);
470 } while (mask != old);
474 ti = current_thread_info();
475 nested = (ti->real_thread != NULL);
477 struct task_struct *task;
478 struct thread_info *tti;
480 task = cpu_tasks[ti->cpu].task;
481 tti = task_thread_info(task);
484 ti->real_thread = tti;
488 mask = xchg(&pending_mask, 0);
489 *mask_out |= mask | nested;
493 unsigned long from_irq_stack(int nested)
495 struct thread_info *ti, *to;
498 ti = current_thread_info();
502 to = ti->real_thread;
504 ti->real_thread = NULL;
507 mask = xchg(&pending_mask, 0);