2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
39 #include <linux/slab.h>
41 #include <acpi/acpi_bus.h>
43 #include <linux/bootmem.h>
44 #include <linux/dmar.h>
45 #include <linux/hpet.h>
52 #include <asm/proto.h>
55 #include <asm/timer.h>
56 #include <asm/i8259.h>
58 #include <asm/msidef.h>
59 #include <asm/hypertransport.h>
60 #include <asm/setup.h>
61 #include <asm/irq_remapping.h>
63 #include <asm/hw_irq.h>
67 #define __apicdebuginit(type) static type __init
68 #define for_each_irq_pin(entry, head) \
69 for (entry = head; entry; entry = entry->next)
72 * Is the SiS APIC rmw bug present ?
73 * -1 = don't know, 0 = no, 1 = yes
75 int sis_apic_bug = -1;
77 static DEFINE_RAW_SPINLOCK(ioapic_lock);
78 static DEFINE_RAW_SPINLOCK(vector_lock);
81 * # of IRQ routing registers
83 int nr_ioapic_registers[MAX_IO_APICS];
85 /* I/O APIC entries */
86 struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
89 /* IO APIC gsi routing info */
90 struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
92 /* The one past the highest gsi number used */
95 /* MP IRQ source entries */
96 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
98 /* # of MP IRQ source entries */
102 static int nr_irqs_gsi = NR_IRQS_LEGACY;
104 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
105 int mp_bus_id_to_type[MAX_MP_BUSSES];
108 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
110 int skip_ioapic_setup;
112 void arch_disable_smp_support(void)
116 noioapicreroute = -1;
118 skip_ioapic_setup = 1;
121 static int __init parse_noapic(char *str)
123 /* disable IO-APIC */
124 arch_disable_smp_support();
127 early_param("noapic", parse_noapic);
129 struct irq_pin_list {
131 struct irq_pin_list *next;
134 static struct irq_pin_list *alloc_irq_pin_list(int node)
136 return kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
139 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
140 #ifdef CONFIG_SPARSE_IRQ
141 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
143 static struct irq_cfg irq_cfgx[NR_IRQS];
146 int __init arch_early_irq_init(void)
151 if (!legacy_pic->nr_legacy_irqs) {
157 count = ARRAY_SIZE(irq_cfgx);
158 node = cpu_to_node(0);
160 /* Make sure the legacy interrupts are marked in the bitmap */
161 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);
163 for (i = 0; i < count; i++) {
164 set_irq_chip_data(i, &cfg[i]);
165 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
166 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
168 * For legacy IRQ's, start with assigning irq0 to irq15 to
169 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
171 if (i < legacy_pic->nr_legacy_irqs) {
172 cfg[i].vector = IRQ0_VECTOR + i;
173 cpumask_set_cpu(0, cfg[i].domain);
180 #ifdef CONFIG_SPARSE_IRQ
181 struct irq_cfg *irq_cfg(unsigned int irq)
183 return get_irq_chip_data(irq);
186 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
190 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
193 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node))
195 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_ATOMIC, node))
199 free_cpumask_var(cfg->domain);
205 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
209 set_irq_chip_data(at, NULL);
210 free_cpumask_var(cfg->domain);
211 free_cpumask_var(cfg->old_domain);
217 struct irq_cfg *irq_cfg(unsigned int irq)
219 return irq < nr_irqs ? irq_cfgx + irq : NULL;
222 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
224 return irq_cfgx + irq;
227 static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { }
231 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
233 int res = irq_alloc_desc_at(at, node);
239 cfg = get_irq_chip_data(at);
244 cfg = alloc_irq_cfg(at, node);
246 set_irq_chip_data(at, cfg);
252 static int alloc_irq_from(unsigned int from, int node)
254 return irq_alloc_desc_from(from, node);
257 static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
259 free_irq_cfg(at, cfg);
265 unsigned int unused[3];
267 unsigned int unused2[11];
271 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
273 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
274 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
277 static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
279 struct io_apic __iomem *io_apic = io_apic_base(apic);
280 writel(vector, &io_apic->eoi);
283 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
285 struct io_apic __iomem *io_apic = io_apic_base(apic);
286 writel(reg, &io_apic->index);
287 return readl(&io_apic->data);
290 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
292 struct io_apic __iomem *io_apic = io_apic_base(apic);
293 writel(reg, &io_apic->index);
294 writel(value, &io_apic->data);
298 * Re-write a value: to be used for read-modify-write
299 * cycles where the read already set up the index register.
301 * Older SiS APIC requires we rewrite the index register
303 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
305 struct io_apic __iomem *io_apic = io_apic_base(apic);
308 writel(reg, &io_apic->index);
309 writel(value, &io_apic->data);
312 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
314 struct irq_pin_list *entry;
317 raw_spin_lock_irqsave(&ioapic_lock, flags);
318 for_each_irq_pin(entry, cfg->irq_2_pin) {
323 reg = io_apic_read(entry->apic, 0x10 + pin*2);
324 /* Is the remote IRR bit set? */
325 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
326 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
330 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
336 struct { u32 w1, w2; };
337 struct IO_APIC_route_entry entry;
340 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
342 union entry_union eu;
344 raw_spin_lock_irqsave(&ioapic_lock, flags);
345 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
346 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
347 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
352 * When we write a new IO APIC routing entry, we need to write the high
353 * word first! If the mask bit in the low word is clear, we will enable
354 * the interrupt, and we need to make sure the entry is fully populated
355 * before that happens.
358 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
360 union entry_union eu = {{0, 0}};
363 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
364 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
367 void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
370 raw_spin_lock_irqsave(&ioapic_lock, flags);
371 __ioapic_write_entry(apic, pin, e);
372 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
376 * When we mask an IO APIC routing entry, we need to write the low
377 * word first, in order to set the mask bit before we change the
380 static void ioapic_mask_entry(int apic, int pin)
383 union entry_union eu = { .entry.mask = 1 };
385 raw_spin_lock_irqsave(&ioapic_lock, flags);
386 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
387 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
388 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
392 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
393 * shared ISA-space IRQs, so we have to support them. We are super
394 * fast in the common case, and fast for shared ISA-space IRQs.
397 __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
399 struct irq_pin_list **last, *entry;
401 /* don't allow duplicates */
402 last = &cfg->irq_2_pin;
403 for_each_irq_pin(entry, cfg->irq_2_pin) {
404 if (entry->apic == apic && entry->pin == pin)
409 entry = alloc_irq_pin_list(node);
411 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
422 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
424 if (__add_pin_to_irq_node(cfg, node, apic, pin))
425 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
429 * Reroute an IRQ to a different pin.
431 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
432 int oldapic, int oldpin,
433 int newapic, int newpin)
435 struct irq_pin_list *entry;
437 for_each_irq_pin(entry, cfg->irq_2_pin) {
438 if (entry->apic == oldapic && entry->pin == oldpin) {
439 entry->apic = newapic;
441 /* every one is different, right? */
446 /* old apic/pin didn't exist, so just add new ones */
447 add_pin_to_irq_node(cfg, node, newapic, newpin);
450 static void __io_apic_modify_irq(struct irq_pin_list *entry,
451 int mask_and, int mask_or,
452 void (*final)(struct irq_pin_list *entry))
454 unsigned int reg, pin;
457 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
460 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
465 static void io_apic_modify_irq(struct irq_cfg *cfg,
466 int mask_and, int mask_or,
467 void (*final)(struct irq_pin_list *entry))
469 struct irq_pin_list *entry;
471 for_each_irq_pin(entry, cfg->irq_2_pin)
472 __io_apic_modify_irq(entry, mask_and, mask_or, final);
475 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
477 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
478 IO_APIC_REDIR_MASKED, NULL);
481 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
483 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
484 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
487 static void io_apic_sync(struct irq_pin_list *entry)
490 * Synchronize the IO-APIC and the CPU by doing
491 * a dummy read from the IO-APIC
493 struct io_apic __iomem *io_apic;
494 io_apic = io_apic_base(entry->apic);
495 readl(&io_apic->data);
498 static void mask_ioapic(struct irq_cfg *cfg)
502 raw_spin_lock_irqsave(&ioapic_lock, flags);
503 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
504 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
507 static void mask_ioapic_irq(struct irq_data *data)
509 mask_ioapic(data->chip_data);
512 static void __unmask_ioapic(struct irq_cfg *cfg)
514 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
517 static void unmask_ioapic(struct irq_cfg *cfg)
521 raw_spin_lock_irqsave(&ioapic_lock, flags);
522 __unmask_ioapic(cfg);
523 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
526 static void unmask_ioapic_irq(struct irq_data *data)
528 unmask_ioapic(data->chip_data);
531 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
533 struct IO_APIC_route_entry entry;
535 /* Check delivery_mode to be sure we're not clearing an SMI pin */
536 entry = ioapic_read_entry(apic, pin);
537 if (entry.delivery_mode == dest_SMI)
540 * Disable it in the IO-APIC irq-routing table:
542 ioapic_mask_entry(apic, pin);
545 static void clear_IO_APIC (void)
549 for (apic = 0; apic < nr_ioapics; apic++)
550 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
551 clear_IO_APIC_pin(apic, pin);
556 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
557 * specific CPU-side IRQs.
561 static int pirq_entries[MAX_PIRQS] = {
562 [0 ... MAX_PIRQS - 1] = -1
565 static int __init ioapic_pirq_setup(char *str)
568 int ints[MAX_PIRQS+1];
570 get_options(str, ARRAY_SIZE(ints), ints);
572 apic_printk(APIC_VERBOSE, KERN_INFO
573 "PIRQ redirection, working around broken MP-BIOS.\n");
575 if (ints[0] < MAX_PIRQS)
578 for (i = 0; i < max; i++) {
579 apic_printk(APIC_VERBOSE, KERN_DEBUG
580 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
582 * PIRQs are mapped upside down, usually.
584 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
589 __setup("pirq=", ioapic_pirq_setup);
590 #endif /* CONFIG_X86_32 */
592 struct IO_APIC_route_entry **alloc_ioapic_entries(void)
595 struct IO_APIC_route_entry **ioapic_entries;
597 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
602 for (apic = 0; apic < nr_ioapics; apic++) {
603 ioapic_entries[apic] =
604 kzalloc(sizeof(struct IO_APIC_route_entry) *
605 nr_ioapic_registers[apic], GFP_ATOMIC);
606 if (!ioapic_entries[apic])
610 return ioapic_entries;
614 kfree(ioapic_entries[apic]);
615 kfree(ioapic_entries);
621 * Saves all the IO-APIC RTE's
623 int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
630 for (apic = 0; apic < nr_ioapics; apic++) {
631 if (!ioapic_entries[apic])
634 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
635 ioapic_entries[apic][pin] =
636 ioapic_read_entry(apic, pin);
643 * Mask all IO APIC entries.
645 void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
652 for (apic = 0; apic < nr_ioapics; apic++) {
653 if (!ioapic_entries[apic])
656 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
657 struct IO_APIC_route_entry entry;
659 entry = ioapic_entries[apic][pin];
662 ioapic_write_entry(apic, pin, entry);
669 * Restore IO APIC entries which was saved in ioapic_entries.
671 int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
678 for (apic = 0; apic < nr_ioapics; apic++) {
679 if (!ioapic_entries[apic])
682 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
683 ioapic_write_entry(apic, pin,
684 ioapic_entries[apic][pin]);
689 void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
693 for (apic = 0; apic < nr_ioapics; apic++)
694 kfree(ioapic_entries[apic]);
696 kfree(ioapic_entries);
700 * Find the IRQ entry number of a certain pin.
702 static int find_irq_entry(int apic, int pin, int type)
706 for (i = 0; i < mp_irq_entries; i++)
707 if (mp_irqs[i].irqtype == type &&
708 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
709 mp_irqs[i].dstapic == MP_APIC_ALL) &&
710 mp_irqs[i].dstirq == pin)
717 * Find the pin to which IRQ[irq] (ISA) is connected
719 static int __init find_isa_irq_pin(int irq, int type)
723 for (i = 0; i < mp_irq_entries; i++) {
724 int lbus = mp_irqs[i].srcbus;
726 if (test_bit(lbus, mp_bus_not_pci) &&
727 (mp_irqs[i].irqtype == type) &&
728 (mp_irqs[i].srcbusirq == irq))
730 return mp_irqs[i].dstirq;
735 static int __init find_isa_irq_apic(int irq, int type)
739 for (i = 0; i < mp_irq_entries; i++) {
740 int lbus = mp_irqs[i].srcbus;
742 if (test_bit(lbus, mp_bus_not_pci) &&
743 (mp_irqs[i].irqtype == type) &&
744 (mp_irqs[i].srcbusirq == irq))
747 if (i < mp_irq_entries) {
749 for(apic = 0; apic < nr_ioapics; apic++) {
750 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
758 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
760 * EISA Edge/Level control register, ELCR
762 static int EISA_ELCR(unsigned int irq)
764 if (irq < legacy_pic->nr_legacy_irqs) {
765 unsigned int port = 0x4d0 + (irq >> 3);
766 return (inb(port) >> (irq & 7)) & 1;
768 apic_printk(APIC_VERBOSE, KERN_INFO
769 "Broken MPtable reports ISA irq %d\n", irq);
775 /* ISA interrupts are always polarity zero edge triggered,
776 * when listed as conforming in the MP table. */
778 #define default_ISA_trigger(idx) (0)
779 #define default_ISA_polarity(idx) (0)
781 /* EISA interrupts are always polarity zero and can be edge or level
782 * trigger depending on the ELCR value. If an interrupt is listed as
783 * EISA conforming in the MP table, that means its trigger type must
784 * be read in from the ELCR */
786 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
787 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
789 /* PCI interrupts are always polarity one level triggered,
790 * when listed as conforming in the MP table. */
792 #define default_PCI_trigger(idx) (1)
793 #define default_PCI_polarity(idx) (1)
795 /* MCA interrupts are always polarity zero level triggered,
796 * when listed as conforming in the MP table. */
798 #define default_MCA_trigger(idx) (1)
799 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
801 static int MPBIOS_polarity(int idx)
803 int bus = mp_irqs[idx].srcbus;
807 * Determine IRQ line polarity (high active or low active):
809 switch (mp_irqs[idx].irqflag & 3)
811 case 0: /* conforms, ie. bus-type dependent polarity */
812 if (test_bit(bus, mp_bus_not_pci))
813 polarity = default_ISA_polarity(idx);
815 polarity = default_PCI_polarity(idx);
817 case 1: /* high active */
822 case 2: /* reserved */
824 printk(KERN_WARNING "broken BIOS!!\n");
828 case 3: /* low active */
833 default: /* invalid */
835 printk(KERN_WARNING "broken BIOS!!\n");
843 static int MPBIOS_trigger(int idx)
845 int bus = mp_irqs[idx].srcbus;
849 * Determine IRQ trigger mode (edge or level sensitive):
851 switch ((mp_irqs[idx].irqflag>>2) & 3)
853 case 0: /* conforms, ie. bus-type dependent */
854 if (test_bit(bus, mp_bus_not_pci))
855 trigger = default_ISA_trigger(idx);
857 trigger = default_PCI_trigger(idx);
858 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
859 switch (mp_bus_id_to_type[bus]) {
860 case MP_BUS_ISA: /* ISA pin */
862 /* set before the switch */
865 case MP_BUS_EISA: /* EISA pin */
867 trigger = default_EISA_trigger(idx);
870 case MP_BUS_PCI: /* PCI pin */
872 /* set before the switch */
875 case MP_BUS_MCA: /* MCA pin */
877 trigger = default_MCA_trigger(idx);
882 printk(KERN_WARNING "broken BIOS!!\n");
894 case 2: /* reserved */
896 printk(KERN_WARNING "broken BIOS!!\n");
905 default: /* invalid */
907 printk(KERN_WARNING "broken BIOS!!\n");
915 static inline int irq_polarity(int idx)
917 return MPBIOS_polarity(idx);
920 static inline int irq_trigger(int idx)
922 return MPBIOS_trigger(idx);
925 static int pin_2_irq(int idx, int apic, int pin)
928 int bus = mp_irqs[idx].srcbus;
931 * Debugging check, we are in big trouble if this message pops up!
933 if (mp_irqs[idx].dstirq != pin)
934 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
936 if (test_bit(bus, mp_bus_not_pci)) {
937 irq = mp_irqs[idx].srcbusirq;
939 u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
941 if (gsi >= NR_IRQS_LEGACY)
949 * PCI IRQ command line redirection. Yes, limits are hardcoded.
951 if ((pin >= 16) && (pin <= 23)) {
952 if (pirq_entries[pin-16] != -1) {
953 if (!pirq_entries[pin-16]) {
954 apic_printk(APIC_VERBOSE, KERN_DEBUG
955 "disabling PIRQ%d\n", pin-16);
957 irq = pirq_entries[pin-16];
958 apic_printk(APIC_VERBOSE, KERN_DEBUG
959 "using PIRQ%d -> IRQ %d\n",
970 * Find a specific PCI IRQ entry.
971 * Not an __init, possibly needed by modules
973 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
974 struct io_apic_irq_attr *irq_attr)
976 int apic, i, best_guess = -1;
978 apic_printk(APIC_DEBUG,
979 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
981 if (test_bit(bus, mp_bus_not_pci)) {
982 apic_printk(APIC_VERBOSE,
983 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
986 for (i = 0; i < mp_irq_entries; i++) {
987 int lbus = mp_irqs[i].srcbus;
989 for (apic = 0; apic < nr_ioapics; apic++)
990 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
991 mp_irqs[i].dstapic == MP_APIC_ALL)
994 if (!test_bit(lbus, mp_bus_not_pci) &&
995 !mp_irqs[i].irqtype &&
997 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
998 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1000 if (!(apic || IO_APIC_IRQ(irq)))
1003 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1004 set_io_apic_irq_attr(irq_attr, apic,
1011 * Use the first all-but-pin matching entry as a
1012 * best-guess fuzzy result for broken mptables.
1014 if (best_guess < 0) {
1015 set_io_apic_irq_attr(irq_attr, apic,
1025 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1027 void lock_vector_lock(void)
1029 /* Used to the online set of cpus does not change
1030 * during assign_irq_vector.
1032 raw_spin_lock(&vector_lock);
1035 void unlock_vector_lock(void)
1037 raw_spin_unlock(&vector_lock);
1041 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1044 * NOTE! The local APIC isn't very good at handling
1045 * multiple interrupts at the same interrupt level.
1046 * As the interrupt level is determined by taking the
1047 * vector number and shifting that right by 4, we
1048 * want to spread these out a bit so that they don't
1049 * all fall in the same interrupt level.
1051 * Also, we've got to be careful not to trash gate
1052 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1054 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1055 static int current_offset = VECTOR_OFFSET_START % 8;
1056 unsigned int old_vector;
1058 cpumask_var_t tmp_mask;
1060 if (cfg->move_in_progress)
1063 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1066 old_vector = cfg->vector;
1068 cpumask_and(tmp_mask, mask, cpu_online_mask);
1069 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1070 if (!cpumask_empty(tmp_mask)) {
1071 free_cpumask_var(tmp_mask);
1076 /* Only try and allocate irqs on cpus that are present */
1078 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1082 apic->vector_allocation_domain(cpu, tmp_mask);
1084 vector = current_vector;
1085 offset = current_offset;
1088 if (vector >= first_system_vector) {
1089 /* If out of vectors on large boxen, must share them. */
1090 offset = (offset + 1) % 8;
1091 vector = FIRST_EXTERNAL_VECTOR + offset;
1093 if (unlikely(current_vector == vector))
1096 if (test_bit(vector, used_vectors))
1099 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1100 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1103 current_vector = vector;
1104 current_offset = offset;
1106 cfg->move_in_progress = 1;
1107 cpumask_copy(cfg->old_domain, cfg->domain);
1109 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1110 per_cpu(vector_irq, new_cpu)[vector] = irq;
1111 cfg->vector = vector;
1112 cpumask_copy(cfg->domain, tmp_mask);
1116 free_cpumask_var(tmp_mask);
1120 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1123 unsigned long flags;
1125 raw_spin_lock_irqsave(&vector_lock, flags);
1126 err = __assign_irq_vector(irq, cfg, mask);
1127 raw_spin_unlock_irqrestore(&vector_lock, flags);
1131 static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1135 BUG_ON(!cfg->vector);
1137 vector = cfg->vector;
1138 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1139 per_cpu(vector_irq, cpu)[vector] = -1;
1142 cpumask_clear(cfg->domain);
1144 if (likely(!cfg->move_in_progress))
1146 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1147 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1149 if (per_cpu(vector_irq, cpu)[vector] != irq)
1151 per_cpu(vector_irq, cpu)[vector] = -1;
1155 cfg->move_in_progress = 0;
1158 void __setup_vector_irq(int cpu)
1160 /* Initialize vector_irq on a new cpu */
1162 struct irq_cfg *cfg;
1163 struct irq_desc *desc;
1166 * vector_lock will make sure that we don't run into irq vector
1167 * assignments that might be happening on another cpu in parallel,
1168 * while we setup our initial vector to irq mappings.
1170 raw_spin_lock(&vector_lock);
1171 /* Mark the inuse vectors */
1172 for_each_irq_desc(irq, desc) {
1173 cfg = get_irq_desc_chip_data(desc);
1176 * If it is a legacy IRQ handled by the legacy PIC, this cpu
1177 * will be part of the irq_cfg's domain.
1179 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
1180 cpumask_set_cpu(cpu, cfg->domain);
1182 if (!cpumask_test_cpu(cpu, cfg->domain))
1184 vector = cfg->vector;
1185 per_cpu(vector_irq, cpu)[vector] = irq;
1187 /* Mark the free vectors */
1188 for (vector = 0; vector < NR_VECTORS; ++vector) {
1189 irq = per_cpu(vector_irq, cpu)[vector];
1194 if (!cpumask_test_cpu(cpu, cfg->domain))
1195 per_cpu(vector_irq, cpu)[vector] = -1;
1197 raw_spin_unlock(&vector_lock);
1200 static struct irq_chip ioapic_chip;
1201 static struct irq_chip ir_ioapic_chip;
1203 #define IOAPIC_AUTO -1
1204 #define IOAPIC_EDGE 0
1205 #define IOAPIC_LEVEL 1
1207 #ifdef CONFIG_X86_32
1208 static inline int IO_APIC_irq_trigger(int irq)
1212 for (apic = 0; apic < nr_ioapics; apic++) {
1213 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1214 idx = find_irq_entry(apic, pin, mp_INT);
1215 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1216 return irq_trigger(idx);
1220 * nonexistent IRQs are edge default
1225 static inline int IO_APIC_irq_trigger(int irq)
1231 static void ioapic_register_intr(unsigned int irq, unsigned long trigger)
1234 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1235 trigger == IOAPIC_LEVEL)
1236 irq_set_status_flags(irq, IRQ_LEVEL);
1238 irq_clear_status_flags(irq, IRQ_LEVEL);
1240 if (irq_remapped(get_irq_chip_data(irq))) {
1241 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
1243 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1247 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1248 handle_edge_irq, "edge");
1252 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1253 trigger == IOAPIC_LEVEL)
1254 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1258 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1259 handle_edge_irq, "edge");
1262 int setup_ioapic_entry(int apic_id, int irq,
1263 struct IO_APIC_route_entry *entry,
1264 unsigned int destination, int trigger,
1265 int polarity, int vector, int pin)
1268 * add it to the IO-APIC irq-routing table:
1270 memset(entry,0,sizeof(*entry));
1272 if (intr_remapping_enabled) {
1273 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1275 struct IR_IO_APIC_route_entry *ir_entry =
1276 (struct IR_IO_APIC_route_entry *) entry;
1280 panic("No mapping iommu for ioapic %d\n", apic_id);
1282 index = alloc_irte(iommu, irq, 1);
1284 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1286 prepare_irte(&irte, vector, destination);
1288 /* Set source-id of interrupt request */
1289 set_ioapic_sid(&irte, apic_id);
1291 modify_irte(irq, &irte);
1293 ir_entry->index2 = (index >> 15) & 0x1;
1295 ir_entry->format = 1;
1296 ir_entry->index = (index & 0x7fff);
1298 * IO-APIC RTE will be configured with virtual vector.
1299 * irq handler will do the explicit EOI to the io-apic.
1301 ir_entry->vector = pin;
1303 entry->delivery_mode = apic->irq_delivery_mode;
1304 entry->dest_mode = apic->irq_dest_mode;
1305 entry->dest = destination;
1306 entry->vector = vector;
1309 entry->mask = 0; /* enable IRQ */
1310 entry->trigger = trigger;
1311 entry->polarity = polarity;
1313 /* Mask level triggered irqs.
1314 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1321 static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
1322 struct irq_cfg *cfg, int trigger, int polarity)
1324 struct IO_APIC_route_entry entry;
1327 if (!IO_APIC_IRQ(irq))
1330 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1331 * controllers like 8259. Now that IO-APIC can handle this irq, update
1334 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1335 apic->vector_allocation_domain(0, cfg->domain);
1337 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1340 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1342 apic_printk(APIC_VERBOSE,KERN_DEBUG
1343 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1344 "IRQ %d Mode:%i Active:%i)\n",
1345 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1346 irq, trigger, polarity);
1349 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1350 dest, trigger, polarity, cfg->vector, pin)) {
1351 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1352 mp_ioapics[apic_id].apicid, pin);
1353 __clear_irq_vector(irq, cfg);
1357 ioapic_register_intr(irq, trigger);
1358 if (irq < legacy_pic->nr_legacy_irqs)
1359 legacy_pic->mask(irq);
1361 ioapic_write_entry(apic_id, pin, entry);
1365 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
1366 } mp_ioapic_routing[MAX_IO_APICS];
1368 static void __init setup_IO_APIC_irqs(void)
1370 int apic_id, pin, idx, irq, notcon = 0;
1371 int node = cpu_to_node(0);
1372 struct irq_cfg *cfg;
1374 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1376 for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
1377 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1378 idx = find_irq_entry(apic_id, pin, mp_INT);
1382 apic_printk(APIC_VERBOSE,
1383 KERN_DEBUG " %d-%d",
1384 mp_ioapics[apic_id].apicid, pin);
1386 apic_printk(APIC_VERBOSE, " %d-%d",
1387 mp_ioapics[apic_id].apicid, pin);
1391 apic_printk(APIC_VERBOSE,
1392 " (apicid-pin) not connected\n");
1396 irq = pin_2_irq(idx, apic_id, pin);
1398 if ((apic_id > 0) && (irq > 16))
1402 * Skip the timer IRQ if there's a quirk handler
1403 * installed and if it returns 1:
1405 if (apic->multi_timer_check &&
1406 apic->multi_timer_check(apic_id, irq))
1409 cfg = alloc_irq_and_cfg_at(irq, node);
1413 add_pin_to_irq_node(cfg, node, apic_id, pin);
1415 * don't mark it in pin_programmed, so later acpi could
1416 * set it correctly when irq < 16
1418 setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx),
1423 apic_printk(APIC_VERBOSE,
1424 " (apicid-pin) not connected\n");
1428 * for the gsit that is not in first ioapic
1429 * but could not use acpi_register_gsi()
1430 * like some special sci in IBM x3330
1432 void setup_IO_APIC_irq_extra(u32 gsi)
1434 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0);
1435 struct irq_cfg *cfg;
1438 * Convert 'gsi' to 'ioapic.pin'.
1440 apic_id = mp_find_ioapic(gsi);
1444 pin = mp_find_ioapic_pin(apic_id, gsi);
1445 idx = find_irq_entry(apic_id, pin, mp_INT);
1449 irq = pin_2_irq(idx, apic_id, pin);
1451 /* Only handle the non legacy irqs on secondary ioapics */
1452 if (apic_id == 0 || irq < NR_IRQS_LEGACY)
1455 cfg = alloc_irq_and_cfg_at(irq, node);
1459 add_pin_to_irq_node(cfg, node, apic_id, pin);
1461 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1462 pr_debug("Pin %d-%d already programmed\n",
1463 mp_ioapics[apic_id].apicid, pin);
1466 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1468 setup_ioapic_irq(apic_id, pin, irq, cfg,
1469 irq_trigger(idx), irq_polarity(idx));
1473 * Set up the timer pin, possibly with the 8259A-master behind.
1475 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1478 struct IO_APIC_route_entry entry;
1480 if (intr_remapping_enabled)
1483 memset(&entry, 0, sizeof(entry));
1486 * We use logical delivery to get the timer IRQ
1489 entry.dest_mode = apic->irq_dest_mode;
1490 entry.mask = 0; /* don't mask IRQ for edge */
1491 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1492 entry.delivery_mode = apic->irq_delivery_mode;
1495 entry.vector = vector;
1498 * The timer IRQ doesn't have to know that behind the
1499 * scene we may have a 8259A-master in AEOI mode ...
1501 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1504 * Add it to the IO-APIC irq-routing table:
1506 ioapic_write_entry(apic_id, pin, entry);
1510 __apicdebuginit(void) print_IO_APIC(void)
1513 union IO_APIC_reg_00 reg_00;
1514 union IO_APIC_reg_01 reg_01;
1515 union IO_APIC_reg_02 reg_02;
1516 union IO_APIC_reg_03 reg_03;
1517 unsigned long flags;
1518 struct irq_cfg *cfg;
1519 struct irq_desc *desc;
1522 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1523 for (i = 0; i < nr_ioapics; i++)
1524 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1525 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1528 * We are a bit conservative about what we expect. We have to
1529 * know about every hardware change ASAP.
1531 printk(KERN_INFO "testing the IO APIC.......................\n");
1533 for (apic = 0; apic < nr_ioapics; apic++) {
1535 raw_spin_lock_irqsave(&ioapic_lock, flags);
1536 reg_00.raw = io_apic_read(apic, 0);
1537 reg_01.raw = io_apic_read(apic, 1);
1538 if (reg_01.bits.version >= 0x10)
1539 reg_02.raw = io_apic_read(apic, 2);
1540 if (reg_01.bits.version >= 0x20)
1541 reg_03.raw = io_apic_read(apic, 3);
1542 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1545 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1546 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1547 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1548 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1549 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1551 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1552 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1554 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1555 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1558 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1559 * but the value of reg_02 is read as the previous read register
1560 * value, so ignore it if reg_02 == reg_01.
1562 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1563 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1564 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1568 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1569 * or reg_03, but the value of reg_0[23] is read as the previous read
1570 * register value, so ignore it if reg_03 == reg_0[12].
1572 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1573 reg_03.raw != reg_01.raw) {
1574 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1575 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1578 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1580 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1581 " Stat Dmod Deli Vect:\n");
1583 for (i = 0; i <= reg_01.bits.entries; i++) {
1584 struct IO_APIC_route_entry entry;
1586 entry = ioapic_read_entry(apic, i);
1588 printk(KERN_DEBUG " %02x %03X ",
1593 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1598 entry.delivery_status,
1600 entry.delivery_mode,
1605 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1606 for_each_irq_desc(irq, desc) {
1607 struct irq_pin_list *entry;
1609 cfg = get_irq_desc_chip_data(desc);
1612 entry = cfg->irq_2_pin;
1615 printk(KERN_DEBUG "IRQ%d ", irq);
1616 for_each_irq_pin(entry, cfg->irq_2_pin)
1617 printk("-> %d:%d", entry->apic, entry->pin);
1621 printk(KERN_INFO ".................................... done.\n");
1626 __apicdebuginit(void) print_APIC_field(int base)
1632 for (i = 0; i < 8; i++)
1633 printk(KERN_CONT "%08x", apic_read(base + i*0x10));
1635 printk(KERN_CONT "\n");
1638 __apicdebuginit(void) print_local_APIC(void *dummy)
1640 unsigned int i, v, ver, maxlvt;
1643 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1644 smp_processor_id(), hard_smp_processor_id());
1645 v = apic_read(APIC_ID);
1646 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1647 v = apic_read(APIC_LVR);
1648 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1649 ver = GET_APIC_VERSION(v);
1650 maxlvt = lapic_get_maxlvt();
1652 v = apic_read(APIC_TASKPRI);
1653 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1655 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1656 if (!APIC_XAPIC(ver)) {
1657 v = apic_read(APIC_ARBPRI);
1658 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1659 v & APIC_ARBPRI_MASK);
1661 v = apic_read(APIC_PROCPRI);
1662 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1666 * Remote read supported only in the 82489DX and local APIC for
1667 * Pentium processors.
1669 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1670 v = apic_read(APIC_RRR);
1671 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1674 v = apic_read(APIC_LDR);
1675 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1676 if (!x2apic_enabled()) {
1677 v = apic_read(APIC_DFR);
1678 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1680 v = apic_read(APIC_SPIV);
1681 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1683 printk(KERN_DEBUG "... APIC ISR field:\n");
1684 print_APIC_field(APIC_ISR);
1685 printk(KERN_DEBUG "... APIC TMR field:\n");
1686 print_APIC_field(APIC_TMR);
1687 printk(KERN_DEBUG "... APIC IRR field:\n");
1688 print_APIC_field(APIC_IRR);
1690 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1691 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1692 apic_write(APIC_ESR, 0);
1694 v = apic_read(APIC_ESR);
1695 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1698 icr = apic_icr_read();
1699 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1700 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1702 v = apic_read(APIC_LVTT);
1703 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1705 if (maxlvt > 3) { /* PC is LVT#4. */
1706 v = apic_read(APIC_LVTPC);
1707 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1709 v = apic_read(APIC_LVT0);
1710 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1711 v = apic_read(APIC_LVT1);
1712 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1714 if (maxlvt > 2) { /* ERR is LVT#3. */
1715 v = apic_read(APIC_LVTERR);
1716 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1719 v = apic_read(APIC_TMICT);
1720 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1721 v = apic_read(APIC_TMCCT);
1722 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1723 v = apic_read(APIC_TDCR);
1724 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1726 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1727 v = apic_read(APIC_EFEAT);
1728 maxlvt = (v >> 16) & 0xff;
1729 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1730 v = apic_read(APIC_ECTRL);
1731 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1732 for (i = 0; i < maxlvt; i++) {
1733 v = apic_read(APIC_EILVTn(i));
1734 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1740 __apicdebuginit(void) print_local_APICs(int maxcpu)
1748 for_each_online_cpu(cpu) {
1751 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1756 __apicdebuginit(void) print_PIC(void)
1759 unsigned long flags;
1761 if (!legacy_pic->nr_legacy_irqs)
1764 printk(KERN_DEBUG "\nprinting PIC contents\n");
1766 raw_spin_lock_irqsave(&i8259A_lock, flags);
1768 v = inb(0xa1) << 8 | inb(0x21);
1769 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1771 v = inb(0xa0) << 8 | inb(0x20);
1772 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1776 v = inb(0xa0) << 8 | inb(0x20);
1780 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1782 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1784 v = inb(0x4d1) << 8 | inb(0x4d0);
1785 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1788 static int __initdata show_lapic = 1;
1789 static __init int setup_show_lapic(char *arg)
1793 if (strcmp(arg, "all") == 0) {
1794 show_lapic = CONFIG_NR_CPUS;
1796 get_option(&arg, &num);
1803 __setup("show_lapic=", setup_show_lapic);
1805 __apicdebuginit(int) print_ICs(void)
1807 if (apic_verbosity == APIC_QUIET)
1812 /* don't print out if apic is not there */
1813 if (!cpu_has_apic && !apic_from_smp_config())
1816 print_local_APICs(show_lapic);
1822 fs_initcall(print_ICs);
1825 /* Where if anywhere is the i8259 connect in external int mode */
1826 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1828 void __init enable_IO_APIC(void)
1830 int i8259_apic, i8259_pin;
1833 if (!legacy_pic->nr_legacy_irqs)
1836 for(apic = 0; apic < nr_ioapics; apic++) {
1838 /* See if any of the pins is in ExtINT mode */
1839 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1840 struct IO_APIC_route_entry entry;
1841 entry = ioapic_read_entry(apic, pin);
1843 /* If the interrupt line is enabled and in ExtInt mode
1844 * I have found the pin where the i8259 is connected.
1846 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1847 ioapic_i8259.apic = apic;
1848 ioapic_i8259.pin = pin;
1854 /* Look to see what if the MP table has reported the ExtINT */
1855 /* If we could not find the appropriate pin by looking at the ioapic
1856 * the i8259 probably is not connected the ioapic but give the
1857 * mptable a chance anyway.
1859 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1860 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1861 /* Trust the MP table if nothing is setup in the hardware */
1862 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1863 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1864 ioapic_i8259.pin = i8259_pin;
1865 ioapic_i8259.apic = i8259_apic;
1867 /* Complain if the MP table and the hardware disagree */
1868 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1869 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1871 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1875 * Do not trust the IO-APIC being empty at bootup
1881 * Not an __init, needed by the reboot code
1883 void disable_IO_APIC(void)
1886 * Clear the IO-APIC before rebooting:
1890 if (!legacy_pic->nr_legacy_irqs)
1894 * If the i8259 is routed through an IOAPIC
1895 * Put that IOAPIC in virtual wire mode
1896 * so legacy interrupts can be delivered.
1898 * With interrupt-remapping, for now we will use virtual wire A mode,
1899 * as virtual wire B is little complex (need to configure both
1900 * IOAPIC RTE aswell as interrupt-remapping table entry).
1901 * As this gets called during crash dump, keep this simple for now.
1903 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
1904 struct IO_APIC_route_entry entry;
1906 memset(&entry, 0, sizeof(entry));
1907 entry.mask = 0; /* Enabled */
1908 entry.trigger = 0; /* Edge */
1910 entry.polarity = 0; /* High */
1911 entry.delivery_status = 0;
1912 entry.dest_mode = 0; /* Physical */
1913 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1915 entry.dest = read_apic_id();
1918 * Add it to the IO-APIC irq-routing table:
1920 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1924 * Use virtual wire A mode when interrupt remapping is enabled.
1926 if (cpu_has_apic || apic_from_smp_config())
1927 disconnect_bsp_APIC(!intr_remapping_enabled &&
1928 ioapic_i8259.pin != -1);
1931 #ifdef CONFIG_X86_32
1933 * function to set the IO-APIC physical IDs based on the
1934 * values stored in the MPC table.
1936 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1939 void __init setup_ioapic_ids_from_mpc(void)
1941 union IO_APIC_reg_00 reg_00;
1942 physid_mask_t phys_id_present_map;
1945 unsigned char old_id;
1946 unsigned long flags;
1951 * Don't check I/O APIC IDs for xAPIC systems. They have
1952 * no meaning without the serial APIC bus.
1954 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1955 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1958 * This is broken; anything with a real cpu count has to
1959 * circumvent this idiocy regardless.
1961 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
1964 * Set the IOAPIC ID to the value stored in the MPC table.
1966 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
1968 /* Read the register 0 value */
1969 raw_spin_lock_irqsave(&ioapic_lock, flags);
1970 reg_00.raw = io_apic_read(apic_id, 0);
1971 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1973 old_id = mp_ioapics[apic_id].apicid;
1975 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
1976 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1977 apic_id, mp_ioapics[apic_id].apicid);
1978 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1980 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
1984 * Sanity check, is the ID really free? Every APIC in a
1985 * system must have a unique ID or we get lots of nice
1986 * 'stuck on smp_invalidate_needed IPI wait' messages.
1988 if (apic->check_apicid_used(&phys_id_present_map,
1989 mp_ioapics[apic_id].apicid)) {
1990 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1991 apic_id, mp_ioapics[apic_id].apicid);
1992 for (i = 0; i < get_physical_broadcast(); i++)
1993 if (!physid_isset(i, phys_id_present_map))
1995 if (i >= get_physical_broadcast())
1996 panic("Max APIC ID exceeded!\n");
1997 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1999 physid_set(i, phys_id_present_map);
2000 mp_ioapics[apic_id].apicid = i;
2003 apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp);
2004 apic_printk(APIC_VERBOSE, "Setting %d in the "
2005 "phys_id_present_map\n",
2006 mp_ioapics[apic_id].apicid);
2007 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2012 * We need to adjust the IRQ routing table
2013 * if the ID changed.
2015 if (old_id != mp_ioapics[apic_id].apicid)
2016 for (i = 0; i < mp_irq_entries; i++)
2017 if (mp_irqs[i].dstapic == old_id)
2019 = mp_ioapics[apic_id].apicid;
2022 * Read the right value from the MPC table and
2023 * write it into the ID register.
2025 apic_printk(APIC_VERBOSE, KERN_INFO
2026 "...changing IO-APIC physical APIC ID to %d ...",
2027 mp_ioapics[apic_id].apicid);
2029 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2030 raw_spin_lock_irqsave(&ioapic_lock, flags);
2031 io_apic_write(apic_id, 0, reg_00.raw);
2032 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2037 raw_spin_lock_irqsave(&ioapic_lock, flags);
2038 reg_00.raw = io_apic_read(apic_id, 0);
2039 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2040 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2041 printk("could not set ID!\n");
2043 apic_printk(APIC_VERBOSE, " ok.\n");
2048 int no_timer_check __initdata;
2050 static int __init notimercheck(char *s)
2055 __setup("no_timer_check", notimercheck);
2058 * There is a nasty bug in some older SMP boards, their mptable lies
2059 * about the timer IRQ. We do the following to work around the situation:
2061 * - timer IRQ defaults to IO-APIC IRQ
2062 * - if this function detects that timer IRQs are defunct, then we fall
2063 * back to ISA timer IRQs
2065 static int __init timer_irq_works(void)
2067 unsigned long t1 = jiffies;
2068 unsigned long flags;
2073 local_save_flags(flags);
2075 /* Let ten ticks pass... */
2076 mdelay((10 * 1000) / HZ);
2077 local_irq_restore(flags);
2080 * Expect a few ticks at least, to be sure some possible
2081 * glue logic does not lock up after one or two first
2082 * ticks in a non-ExtINT mode. Also the local APIC
2083 * might have cached one ExtINT interrupt. Finally, at
2084 * least one tick may be lost due to delays.
2088 if (time_after(jiffies, t1 + 4))
2094 * In the SMP+IOAPIC case it might happen that there are an unspecified
2095 * number of pending IRQ events unhandled. These cases are very rare,
2096 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2097 * better to do it this way as thus we do not have to be aware of
2098 * 'pending' interrupts in the IRQ path, except at this point.
2101 * Edge triggered needs to resend any interrupt
2102 * that was delayed but this is now handled in the device
2107 * Starting up a edge-triggered IO-APIC interrupt is
2108 * nasty - we need to make sure that we get the edge.
2109 * If it is already asserted for some reason, we need
2110 * return 1 to indicate that is was pending.
2112 * This is not complete - we should be able to fake
2113 * an edge even if it isn't on the 8259A...
2116 static unsigned int startup_ioapic_irq(struct irq_data *data)
2118 int was_pending = 0, irq = data->irq;
2119 unsigned long flags;
2121 raw_spin_lock_irqsave(&ioapic_lock, flags);
2122 if (irq < legacy_pic->nr_legacy_irqs) {
2123 legacy_pic->mask(irq);
2124 if (legacy_pic->irq_pending(irq))
2127 __unmask_ioapic(data->chip_data);
2128 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2133 static int ioapic_retrigger_irq(struct irq_data *data)
2135 struct irq_cfg *cfg = data->chip_data;
2136 unsigned long flags;
2138 raw_spin_lock_irqsave(&vector_lock, flags);
2139 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2140 raw_spin_unlock_irqrestore(&vector_lock, flags);
2146 * Level and edge triggered IO-APIC interrupts need different handling,
2147 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2148 * handled with the level-triggered descriptor, but that one has slightly
2149 * more overhead. Level-triggered interrupts cannot be handled with the
2150 * edge-triggered handler, without risking IRQ storms and other ugly
2155 void send_cleanup_vector(struct irq_cfg *cfg)
2157 cpumask_var_t cleanup_mask;
2159 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2161 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2162 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2164 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2165 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2166 free_cpumask_var(cleanup_mask);
2168 cfg->move_in_progress = 0;
2171 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2174 struct irq_pin_list *entry;
2175 u8 vector = cfg->vector;
2177 for_each_irq_pin(entry, cfg->irq_2_pin) {
2183 * With interrupt-remapping, destination information comes
2184 * from interrupt-remapping table entry.
2186 if (!irq_remapped(cfg))
2187 io_apic_write(apic, 0x11 + pin*2, dest);
2188 reg = io_apic_read(apic, 0x10 + pin*2);
2189 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2191 io_apic_modify(apic, 0x10 + pin*2, reg);
2196 * Either sets data->affinity to a valid value, and returns
2197 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2198 * leaves data->affinity untouched.
2200 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2201 unsigned int *dest_id)
2203 struct irq_cfg *cfg = data->chip_data;
2205 if (!cpumask_intersects(mask, cpu_online_mask))
2208 if (assign_irq_vector(data->irq, data->chip_data, mask))
2211 cpumask_copy(data->affinity, mask);
2213 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
2218 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2221 unsigned int dest, irq = data->irq;
2222 unsigned long flags;
2225 raw_spin_lock_irqsave(&ioapic_lock, flags);
2226 ret = __ioapic_set_affinity(data, mask, &dest);
2228 /* Only the high 8 bits are valid. */
2229 dest = SET_APIC_LOGICAL_ID(dest);
2230 __target_IO_APIC_irq(irq, dest, data->chip_data);
2232 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2236 #ifdef CONFIG_INTR_REMAP
2239 * Migrate the IO-APIC irq in the presence of intr-remapping.
2241 * For both level and edge triggered, irq migration is a simple atomic
2242 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
2244 * For level triggered, we eliminate the io-apic RTE modification (with the
2245 * updated vector information), by using a virtual vector (io-apic pin number).
2246 * Real vector that is used for interrupting cpu will be coming from
2247 * the interrupt-remapping table entry.
2250 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2253 struct irq_cfg *cfg = data->chip_data;
2254 unsigned int dest, irq = data->irq;
2257 if (!cpumask_intersects(mask, cpu_online_mask))
2260 if (get_irte(irq, &irte))
2263 if (assign_irq_vector(irq, cfg, mask))
2266 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2268 irte.vector = cfg->vector;
2269 irte.dest_id = IRTE_DEST(dest);
2272 * Modified the IRTE and flushes the Interrupt entry cache.
2274 modify_irte(irq, &irte);
2276 if (cfg->move_in_progress)
2277 send_cleanup_vector(cfg);
2279 cpumask_copy(data->affinity, mask);
2285 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2292 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2294 unsigned vector, me;
2300 me = smp_processor_id();
2301 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2304 struct irq_desc *desc;
2305 struct irq_cfg *cfg;
2306 irq = __get_cpu_var(vector_irq)[vector];
2311 desc = irq_to_desc(irq);
2316 raw_spin_lock(&desc->lock);
2319 * Check if the irq migration is in progress. If so, we
2320 * haven't received the cleanup request yet for this irq.
2322 if (cfg->move_in_progress)
2325 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2328 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2330 * Check if the vector that needs to be cleanedup is
2331 * registered at the cpu's IRR. If so, then this is not
2332 * the best time to clean it up. Lets clean it up in the
2333 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2336 if (irr & (1 << (vector % 32))) {
2337 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2340 __get_cpu_var(vector_irq)[vector] = -1;
2342 raw_spin_unlock(&desc->lock);
2348 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2352 if (likely(!cfg->move_in_progress))
2355 me = smp_processor_id();
2357 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2358 send_cleanup_vector(cfg);
2361 static void irq_complete_move(struct irq_cfg *cfg)
2363 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2366 void irq_force_complete_move(int irq)
2368 struct irq_cfg *cfg = get_irq_chip_data(irq);
2373 __irq_complete_move(cfg, cfg->vector);
2376 static inline void irq_complete_move(struct irq_cfg *cfg) { }
2379 static void ack_apic_edge(struct irq_data *data)
2381 irq_complete_move(data->chip_data);
2382 move_native_irq(data->irq);
2386 atomic_t irq_mis_count;
2389 * IO-APIC versions below 0x20 don't support EOI register.
2390 * For the record, here is the information about various versions:
2392 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
2393 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant
2396 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
2397 * version as 0x2. This is an error with documentation and these ICH chips
2398 * use io-apic's of version 0x20.
2400 * For IO-APIC's with EOI register, we use that to do an explicit EOI.
2401 * Otherwise, we simulate the EOI message manually by changing the trigger
2402 * mode to edge and then back to level, with RTE being masked during this.
2404 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2406 struct irq_pin_list *entry;
2407 unsigned long flags;
2409 raw_spin_lock_irqsave(&ioapic_lock, flags);
2410 for_each_irq_pin(entry, cfg->irq_2_pin) {
2411 if (mp_ioapics[entry->apic].apicver >= 0x20) {
2413 * Intr-remapping uses pin number as the virtual vector
2414 * in the RTE. Actual vector is programmed in
2415 * intr-remapping table entry. Hence for the io-apic
2416 * EOI we use the pin number.
2418 if (irq_remapped(cfg))
2419 io_apic_eoi(entry->apic, entry->pin);
2421 io_apic_eoi(entry->apic, cfg->vector);
2423 __mask_and_edge_IO_APIC_irq(entry);
2424 __unmask_and_level_IO_APIC_irq(entry);
2427 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2430 static void ack_apic_level(struct irq_data *data)
2432 struct irq_cfg *cfg = data->chip_data;
2433 int i, do_unmask_irq = 0, irq = data->irq;
2434 struct irq_desc *desc = irq_to_desc(irq);
2437 irq_complete_move(cfg);
2438 #ifdef CONFIG_GENERIC_PENDING_IRQ
2439 /* If we are moving the irq we need to mask it */
2440 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2447 * It appears there is an erratum which affects at least version 0x11
2448 * of I/O APIC (that's the 82093AA and cores integrated into various
2449 * chipsets). Under certain conditions a level-triggered interrupt is
2450 * erroneously delivered as edge-triggered one but the respective IRR
2451 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2452 * message but it will never arrive and further interrupts are blocked
2453 * from the source. The exact reason is so far unknown, but the
2454 * phenomenon was observed when two consecutive interrupt requests
2455 * from a given source get delivered to the same CPU and the source is
2456 * temporarily disabled in between.
2458 * A workaround is to simulate an EOI message manually. We achieve it
2459 * by setting the trigger mode to edge and then to level when the edge
2460 * trigger mode gets detected in the TMR of a local APIC for a
2461 * level-triggered interrupt. We mask the source for the time of the
2462 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2463 * The idea is from Manfred Spraul. --macro
2465 * Also in the case when cpu goes offline, fixup_irqs() will forward
2466 * any unhandled interrupt on the offlined cpu to the new cpu
2467 * destination that is handling the corresponding interrupt. This
2468 * interrupt forwarding is done via IPI's. Hence, in this case also
2469 * level-triggered io-apic interrupt will be seen as an edge
2470 * interrupt in the IRR. And we can't rely on the cpu's EOI
2471 * to be broadcasted to the IO-APIC's which will clear the remoteIRR
2472 * corresponding to the level-triggered interrupt. Hence on IO-APIC's
2473 * supporting EOI register, we do an explicit EOI to clear the
2474 * remote IRR and on IO-APIC's which don't have an EOI register,
2475 * we use the above logic (mask+edge followed by unmask+level) from
2476 * Manfred Spraul to clear the remote IRR.
2479 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2482 * We must acknowledge the irq before we move it or the acknowledge will
2483 * not propagate properly.
2488 * Tail end of clearing remote IRR bit (either by delivering the EOI
2489 * message via io-apic EOI register write or simulating it using
2490 * mask+edge followed by unnask+level logic) manually when the
2491 * level triggered interrupt is seen as the edge triggered interrupt
2494 if (!(v & (1 << (i & 0x1f)))) {
2495 atomic_inc(&irq_mis_count);
2497 eoi_ioapic_irq(irq, cfg);
2500 /* Now we can move and renable the irq */
2501 if (unlikely(do_unmask_irq)) {
2502 /* Only migrate the irq if the ack has been received.
2504 * On rare occasions the broadcast level triggered ack gets
2505 * delayed going to ioapics, and if we reprogram the
2506 * vector while Remote IRR is still set the irq will never
2509 * To prevent this scenario we read the Remote IRR bit
2510 * of the ioapic. This has two effects.
2511 * - On any sane system the read of the ioapic will
2512 * flush writes (and acks) going to the ioapic from
2514 * - We get to see if the ACK has actually been delivered.
2516 * Based on failed experiments of reprogramming the
2517 * ioapic entry from outside of irq context starting
2518 * with masking the ioapic entry and then polling until
2519 * Remote IRR was clear before reprogramming the
2520 * ioapic I don't trust the Remote IRR bit to be
2521 * completey accurate.
2523 * However there appears to be no other way to plug
2524 * this race, so if the Remote IRR bit is not
2525 * accurate and is causing problems then it is a hardware bug
2526 * and you can go talk to the chipset vendor about it.
2528 if (!io_apic_level_ack_pending(cfg))
2529 move_masked_irq(irq);
2534 #ifdef CONFIG_INTR_REMAP
2535 static void ir_ack_apic_edge(struct irq_data *data)
2540 static void ir_ack_apic_level(struct irq_data *data)
2543 eoi_ioapic_irq(data->irq, data->chip_data);
2545 #endif /* CONFIG_INTR_REMAP */
2547 static struct irq_chip ioapic_chip __read_mostly = {
2549 .irq_startup = startup_ioapic_irq,
2550 .irq_mask = mask_ioapic_irq,
2551 .irq_unmask = unmask_ioapic_irq,
2552 .irq_ack = ack_apic_edge,
2553 .irq_eoi = ack_apic_level,
2555 .irq_set_affinity = ioapic_set_affinity,
2557 .irq_retrigger = ioapic_retrigger_irq,
2560 static struct irq_chip ir_ioapic_chip __read_mostly = {
2561 .name = "IR-IO-APIC",
2562 .irq_startup = startup_ioapic_irq,
2563 .irq_mask = mask_ioapic_irq,
2564 .irq_unmask = unmask_ioapic_irq,
2565 #ifdef CONFIG_INTR_REMAP
2566 .irq_ack = ir_ack_apic_edge,
2567 .irq_eoi = ir_ack_apic_level,
2569 .irq_set_affinity = ir_ioapic_set_affinity,
2572 .irq_retrigger = ioapic_retrigger_irq,
2575 static inline void init_IO_APIC_traps(void)
2578 struct irq_desc *desc;
2579 struct irq_cfg *cfg;
2582 * NOTE! The local APIC isn't very good at handling
2583 * multiple interrupts at the same interrupt level.
2584 * As the interrupt level is determined by taking the
2585 * vector number and shifting that right by 4, we
2586 * want to spread these out a bit so that they don't
2587 * all fall in the same interrupt level.
2589 * Also, we've got to be careful not to trash gate
2590 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2592 for_each_irq_desc(irq, desc) {
2593 cfg = get_irq_desc_chip_data(desc);
2594 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2596 * Hmm.. We don't have an entry for this,
2597 * so default to an old-fashioned 8259
2598 * interrupt if we can..
2600 if (irq < legacy_pic->nr_legacy_irqs)
2601 legacy_pic->make_irq(irq);
2603 /* Strange. Oh, well.. */
2604 desc->chip = &no_irq_chip;
2610 * The local APIC irq-chip implementation:
2613 static void mask_lapic_irq(struct irq_data *data)
2617 v = apic_read(APIC_LVT0);
2618 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2621 static void unmask_lapic_irq(struct irq_data *data)
2625 v = apic_read(APIC_LVT0);
2626 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2629 static void ack_lapic_irq(struct irq_data *data)
2634 static struct irq_chip lapic_chip __read_mostly = {
2635 .name = "local-APIC",
2636 .irq_mask = mask_lapic_irq,
2637 .irq_unmask = unmask_lapic_irq,
2638 .irq_ack = ack_lapic_irq,
2641 static void lapic_register_intr(int irq)
2643 irq_clear_status_flags(irq, IRQ_LEVEL);
2644 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2648 static void __init setup_nmi(void)
2651 * Dirty trick to enable the NMI watchdog ...
2652 * We put the 8259A master into AEOI mode and
2653 * unmask on all local APICs LVT0 as NMI.
2655 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2656 * is from Maciej W. Rozycki - so we do not have to EOI from
2657 * the NMI handler or the timer interrupt.
2659 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2661 enable_NMI_through_LVT0();
2663 apic_printk(APIC_VERBOSE, " done.\n");
2667 * This looks a bit hackish but it's about the only one way of sending
2668 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2669 * not support the ExtINT mode, unfortunately. We need to send these
2670 * cycles as some i82489DX-based boards have glue logic that keeps the
2671 * 8259A interrupt line asserted until INTA. --macro
2673 static inline void __init unlock_ExtINT_logic(void)
2676 struct IO_APIC_route_entry entry0, entry1;
2677 unsigned char save_control, save_freq_select;
2679 pin = find_isa_irq_pin(8, mp_INT);
2684 apic = find_isa_irq_apic(8, mp_INT);
2690 entry0 = ioapic_read_entry(apic, pin);
2691 clear_IO_APIC_pin(apic, pin);
2693 memset(&entry1, 0, sizeof(entry1));
2695 entry1.dest_mode = 0; /* physical delivery */
2696 entry1.mask = 0; /* unmask IRQ now */
2697 entry1.dest = hard_smp_processor_id();
2698 entry1.delivery_mode = dest_ExtINT;
2699 entry1.polarity = entry0.polarity;
2703 ioapic_write_entry(apic, pin, entry1);
2705 save_control = CMOS_READ(RTC_CONTROL);
2706 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2707 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2709 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2714 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2718 CMOS_WRITE(save_control, RTC_CONTROL);
2719 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2720 clear_IO_APIC_pin(apic, pin);
2722 ioapic_write_entry(apic, pin, entry0);
2725 static int disable_timer_pin_1 __initdata;
2726 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2727 static int __init disable_timer_pin_setup(char *arg)
2729 disable_timer_pin_1 = 1;
2732 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2734 int timer_through_8259 __initdata;
2737 * This code may look a bit paranoid, but it's supposed to cooperate with
2738 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2739 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2740 * fanatically on his truly buggy board.
2742 * FIXME: really need to revamp this for all platforms.
2744 static inline void __init check_timer(void)
2746 struct irq_cfg *cfg = get_irq_chip_data(0);
2747 int node = cpu_to_node(0);
2748 int apic1, pin1, apic2, pin2;
2749 unsigned long flags;
2752 local_irq_save(flags);
2755 * get/set the timer IRQ vector:
2757 legacy_pic->mask(0);
2758 assign_irq_vector(0, cfg, apic->target_cpus());
2761 * As IRQ0 is to be enabled in the 8259A, the virtual
2762 * wire has to be disabled in the local APIC. Also
2763 * timer interrupts need to be acknowledged manually in
2764 * the 8259A for the i82489DX when using the NMI
2765 * watchdog as that APIC treats NMIs as level-triggered.
2766 * The AEOI mode will finish them in the 8259A
2769 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2770 legacy_pic->init(1);
2771 #ifdef CONFIG_X86_32
2775 ver = apic_read(APIC_LVR);
2776 ver = GET_APIC_VERSION(ver);
2777 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2781 pin1 = find_isa_irq_pin(0, mp_INT);
2782 apic1 = find_isa_irq_apic(0, mp_INT);
2783 pin2 = ioapic_i8259.pin;
2784 apic2 = ioapic_i8259.apic;
2786 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2787 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2788 cfg->vector, apic1, pin1, apic2, pin2);
2791 * Some BIOS writers are clueless and report the ExtINTA
2792 * I/O APIC input from the cascaded 8259A as the timer
2793 * interrupt input. So just in case, if only one pin
2794 * was found above, try it both directly and through the
2798 if (intr_remapping_enabled)
2799 panic("BIOS bug: timer not connected to IO-APIC");
2803 } else if (pin2 == -1) {
2810 * Ok, does IRQ0 through the IOAPIC work?
2813 add_pin_to_irq_node(cfg, node, apic1, pin1);
2814 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2816 /* for edge trigger, setup_ioapic_irq already
2817 * leave it unmasked.
2818 * so only need to unmask if it is level-trigger
2819 * do we really have level trigger timer?
2822 idx = find_irq_entry(apic1, pin1, mp_INT);
2823 if (idx != -1 && irq_trigger(idx))
2826 if (timer_irq_works()) {
2827 if (nmi_watchdog == NMI_IO_APIC) {
2829 legacy_pic->unmask(0);
2831 if (disable_timer_pin_1 > 0)
2832 clear_IO_APIC_pin(0, pin1);
2835 if (intr_remapping_enabled)
2836 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2837 local_irq_disable();
2838 clear_IO_APIC_pin(apic1, pin1);
2840 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2841 "8254 timer not connected to IO-APIC\n");
2843 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2844 "(IRQ0) through the 8259A ...\n");
2845 apic_printk(APIC_QUIET, KERN_INFO
2846 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2848 * legacy devices should be connected to IO APIC #0
2850 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
2851 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2852 legacy_pic->unmask(0);
2853 if (timer_irq_works()) {
2854 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2855 timer_through_8259 = 1;
2856 if (nmi_watchdog == NMI_IO_APIC) {
2857 legacy_pic->mask(0);
2859 legacy_pic->unmask(0);
2864 * Cleanup, just in case ...
2866 local_irq_disable();
2867 legacy_pic->mask(0);
2868 clear_IO_APIC_pin(apic2, pin2);
2869 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2872 if (nmi_watchdog == NMI_IO_APIC) {
2873 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2874 "through the IO-APIC - disabling NMI Watchdog!\n");
2875 nmi_watchdog = NMI_NONE;
2877 #ifdef CONFIG_X86_32
2881 apic_printk(APIC_QUIET, KERN_INFO
2882 "...trying to set up timer as Virtual Wire IRQ...\n");
2884 lapic_register_intr(0);
2885 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2886 legacy_pic->unmask(0);
2888 if (timer_irq_works()) {
2889 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2892 local_irq_disable();
2893 legacy_pic->mask(0);
2894 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2895 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2897 apic_printk(APIC_QUIET, KERN_INFO
2898 "...trying to set up timer as ExtINT IRQ...\n");
2900 legacy_pic->init(0);
2901 legacy_pic->make_irq(0);
2902 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2904 unlock_ExtINT_logic();
2906 if (timer_irq_works()) {
2907 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2910 local_irq_disable();
2911 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2912 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2913 "report. Then try booting with the 'noapic' option.\n");
2915 local_irq_restore(flags);
2919 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2920 * to devices. However there may be an I/O APIC pin available for
2921 * this interrupt regardless. The pin may be left unconnected, but
2922 * typically it will be reused as an ExtINT cascade interrupt for
2923 * the master 8259A. In the MPS case such a pin will normally be
2924 * reported as an ExtINT interrupt in the MP table. With ACPI
2925 * there is no provision for ExtINT interrupts, and in the absence
2926 * of an override it would be treated as an ordinary ISA I/O APIC
2927 * interrupt, that is edge-triggered and unmasked by default. We
2928 * used to do this, but it caused problems on some systems because
2929 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2930 * the same ExtINT cascade interrupt to drive the local APIC of the
2931 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2932 * the I/O APIC in all cases now. No actual device should request
2933 * it anyway. --macro
2935 #define PIC_IRQS (1UL << PIC_CASCADE_IR)
2937 void __init setup_IO_APIC(void)
2941 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2943 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
2945 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2947 * Set up IO-APIC IRQ routing.
2949 x86_init.mpparse.setup_ioapic_ids();
2952 setup_IO_APIC_irqs();
2953 init_IO_APIC_traps();
2954 if (legacy_pic->nr_legacy_irqs)
2959 * Called after all the initialization is done. If we didnt find any
2960 * APIC bugs then we can allow the modify fast path
2963 static int __init io_apic_bug_finalize(void)
2965 if (sis_apic_bug == -1)
2970 late_initcall(io_apic_bug_finalize);
2972 struct sysfs_ioapic_data {
2973 struct sys_device dev;
2974 struct IO_APIC_route_entry entry[0];
2976 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2978 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2980 struct IO_APIC_route_entry *entry;
2981 struct sysfs_ioapic_data *data;
2984 data = container_of(dev, struct sysfs_ioapic_data, dev);
2985 entry = data->entry;
2986 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2987 *entry = ioapic_read_entry(dev->id, i);
2992 static int ioapic_resume(struct sys_device *dev)
2994 struct IO_APIC_route_entry *entry;
2995 struct sysfs_ioapic_data *data;
2996 unsigned long flags;
2997 union IO_APIC_reg_00 reg_00;
3000 data = container_of(dev, struct sysfs_ioapic_data, dev);
3001 entry = data->entry;
3003 raw_spin_lock_irqsave(&ioapic_lock, flags);
3004 reg_00.raw = io_apic_read(dev->id, 0);
3005 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3006 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3007 io_apic_write(dev->id, 0, reg_00.raw);
3009 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3010 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3011 ioapic_write_entry(dev->id, i, entry[i]);
3016 static struct sysdev_class ioapic_sysdev_class = {
3018 .suspend = ioapic_suspend,
3019 .resume = ioapic_resume,
3022 static int __init ioapic_init_sysfs(void)
3024 struct sys_device * dev;
3027 error = sysdev_class_register(&ioapic_sysdev_class);
3031 for (i = 0; i < nr_ioapics; i++ ) {
3032 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
3033 * sizeof(struct IO_APIC_route_entry);
3034 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
3035 if (!mp_ioapic_data[i]) {
3036 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3039 dev = &mp_ioapic_data[i]->dev;
3041 dev->cls = &ioapic_sysdev_class;
3042 error = sysdev_register(dev);
3044 kfree(mp_ioapic_data[i]);
3045 mp_ioapic_data[i] = NULL;
3046 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3054 device_initcall(ioapic_init_sysfs);
3057 * Dynamic irq allocate and deallocation
3059 unsigned int create_irq_nr(unsigned int from, int node)
3061 struct irq_cfg *cfg;
3062 unsigned long flags;
3063 unsigned int ret = 0;
3066 if (from < nr_irqs_gsi)
3069 irq = alloc_irq_from(from, node);
3072 cfg = alloc_irq_cfg(irq, node);
3074 free_irq_at(irq, NULL);
3078 raw_spin_lock_irqsave(&vector_lock, flags);
3079 if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
3081 raw_spin_unlock_irqrestore(&vector_lock, flags);
3084 set_irq_chip_data(irq, cfg);
3085 irq_clear_status_flags(irq, IRQ_NOREQUEST);
3087 free_irq_at(irq, cfg);
3092 int create_irq(void)
3094 int node = cpu_to_node(0);
3095 unsigned int irq_want;
3098 irq_want = nr_irqs_gsi;
3099 irq = create_irq_nr(irq_want, node);
3107 void destroy_irq(unsigned int irq)
3109 struct irq_cfg *cfg = get_irq_chip_data(irq);
3110 unsigned long flags;
3112 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3115 raw_spin_lock_irqsave(&vector_lock, flags);
3116 __clear_irq_vector(irq, cfg);
3117 raw_spin_unlock_irqrestore(&vector_lock, flags);
3118 free_irq_at(irq, cfg);
3122 * MSI message composition
3124 #ifdef CONFIG_PCI_MSI
3125 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3126 struct msi_msg *msg, u8 hpet_id)
3128 struct irq_cfg *cfg;
3136 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3140 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3142 if (irq_remapped(get_irq_chip_data(irq))) {
3147 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3148 BUG_ON(ir_index == -1);
3150 prepare_irte(&irte, cfg->vector, dest);
3152 /* Set source-id of interrupt request */
3154 set_msi_sid(&irte, pdev);
3156 set_hpet_sid(&irte, hpet_id);
3158 modify_irte(irq, &irte);
3160 msg->address_hi = MSI_ADDR_BASE_HI;
3161 msg->data = sub_handle;
3162 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3164 MSI_ADDR_IR_INDEX1(ir_index) |
3165 MSI_ADDR_IR_INDEX2(ir_index);
3167 if (x2apic_enabled())
3168 msg->address_hi = MSI_ADDR_BASE_HI |
3169 MSI_ADDR_EXT_DEST_ID(dest);
3171 msg->address_hi = MSI_ADDR_BASE_HI;
3175 ((apic->irq_dest_mode == 0) ?
3176 MSI_ADDR_DEST_MODE_PHYSICAL:
3177 MSI_ADDR_DEST_MODE_LOGICAL) |
3178 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3179 MSI_ADDR_REDIRECTION_CPU:
3180 MSI_ADDR_REDIRECTION_LOWPRI) |
3181 MSI_ADDR_DEST_ID(dest);
3184 MSI_DATA_TRIGGER_EDGE |
3185 MSI_DATA_LEVEL_ASSERT |
3186 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3187 MSI_DATA_DELIVERY_FIXED:
3188 MSI_DATA_DELIVERY_LOWPRI) |
3189 MSI_DATA_VECTOR(cfg->vector);
3196 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3198 struct irq_cfg *cfg = data->chip_data;
3202 if (__ioapic_set_affinity(data, mask, &dest))
3205 __get_cached_msi_msg(data->msi_desc, &msg);
3207 msg.data &= ~MSI_DATA_VECTOR_MASK;
3208 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3209 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3210 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3212 __write_msi_msg(data->msi_desc, &msg);
3216 #ifdef CONFIG_INTR_REMAP
3218 * Migrate the MSI irq to another cpumask. This migration is
3219 * done in the process context using interrupt-remapping hardware.
3222 ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3225 struct irq_cfg *cfg = data->chip_data;
3226 unsigned int dest, irq = data->irq;
3229 if (get_irte(irq, &irte))
3232 if (__ioapic_set_affinity(data, mask, &dest))
3235 irte.vector = cfg->vector;
3236 irte.dest_id = IRTE_DEST(dest);
3239 * atomically update the IRTE with the new destination and vector.
3241 modify_irte(irq, &irte);
3244 * After this point, all the interrupts will start arriving
3245 * at the new destination. So, time to cleanup the previous
3246 * vector allocation.
3248 if (cfg->move_in_progress)
3249 send_cleanup_vector(cfg);
3255 #endif /* CONFIG_SMP */
3258 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3259 * which implement the MSI or MSI-X Capability Structure.
3261 static struct irq_chip msi_chip = {
3263 .irq_unmask = unmask_msi_irq,
3264 .irq_mask = mask_msi_irq,
3265 .irq_ack = ack_apic_edge,
3267 .irq_set_affinity = msi_set_affinity,
3269 .irq_retrigger = ioapic_retrigger_irq,
3272 static struct irq_chip msi_ir_chip = {
3273 .name = "IR-PCI-MSI",
3274 .irq_unmask = unmask_msi_irq,
3275 .irq_mask = mask_msi_irq,
3276 #ifdef CONFIG_INTR_REMAP
3277 .irq_ack = ir_ack_apic_edge,
3279 .irq_set_affinity = ir_msi_set_affinity,
3282 .irq_retrigger = ioapic_retrigger_irq,
3286 * Map the PCI dev to the corresponding remapping hardware unit
3287 * and allocate 'nvec' consecutive interrupt-remapping table entries
3290 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3292 struct intel_iommu *iommu;
3295 iommu = map_dev_to_ir(dev);
3298 "Unable to map PCI %s to iommu\n", pci_name(dev));
3302 index = alloc_irte(iommu, irq, nvec);
3305 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3312 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3317 ret = msi_compose_msg(dev, irq, &msg, -1);
3321 set_irq_msi(irq, msidesc);
3322 write_msi_msg(irq, &msg);
3324 if (irq_remapped(get_irq_chip_data(irq))) {
3325 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3326 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3328 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3330 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3335 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3337 int node, ret, sub_handle, index = 0;
3338 unsigned int irq, irq_want;
3339 struct msi_desc *msidesc;
3340 struct intel_iommu *iommu = NULL;
3342 /* x86 doesn't support multiple MSI yet */
3343 if (type == PCI_CAP_ID_MSI && nvec > 1)
3346 node = dev_to_node(&dev->dev);
3347 irq_want = nr_irqs_gsi;
3349 list_for_each_entry(msidesc, &dev->msi_list, list) {
3350 irq = create_irq_nr(irq_want, node);
3354 if (!intr_remapping_enabled)
3359 * allocate the consecutive block of IRTE's
3362 index = msi_alloc_irte(dev, irq, nvec);
3368 iommu = map_dev_to_ir(dev);
3374 * setup the mapping between the irq and the IRTE
3375 * base index, the sub_handle pointing to the
3376 * appropriate interrupt remap table entry.
3378 set_irte_irq(irq, iommu, index, sub_handle);
3381 ret = setup_msi_irq(dev, msidesc, irq);
3393 void arch_teardown_msi_irq(unsigned int irq)
3398 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3401 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3404 struct irq_cfg *cfg = data->chip_data;
3405 unsigned int dest, irq = data->irq;
3408 if (__ioapic_set_affinity(data, mask, &dest))
3411 dmar_msi_read(irq, &msg);
3413 msg.data &= ~MSI_DATA_VECTOR_MASK;
3414 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3415 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3416 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3418 dmar_msi_write(irq, &msg);
3423 #endif /* CONFIG_SMP */
3425 static struct irq_chip dmar_msi_type = {
3427 .irq_unmask = dmar_msi_unmask,
3428 .irq_mask = dmar_msi_mask,
3429 .irq_ack = ack_apic_edge,
3431 .irq_set_affinity = dmar_msi_set_affinity,
3433 .irq_retrigger = ioapic_retrigger_irq,
3436 int arch_setup_dmar_msi(unsigned int irq)
3441 ret = msi_compose_msg(NULL, irq, &msg, -1);
3444 dmar_msi_write(irq, &msg);
3445 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3451 #ifdef CONFIG_HPET_TIMER
3454 static int hpet_msi_set_affinity(struct irq_data *data,
3455 const struct cpumask *mask, bool force)
3457 struct irq_cfg *cfg = data->chip_data;
3461 if (__ioapic_set_affinity(data, mask, &dest))
3464 hpet_msi_read(data->handler_data, &msg);
3466 msg.data &= ~MSI_DATA_VECTOR_MASK;
3467 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3468 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3469 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3471 hpet_msi_write(data->handler_data, &msg);
3476 #endif /* CONFIG_SMP */
3478 static struct irq_chip ir_hpet_msi_type = {
3479 .name = "IR-HPET_MSI",
3480 .irq_unmask = hpet_msi_unmask,
3481 .irq_mask = hpet_msi_mask,
3482 #ifdef CONFIG_INTR_REMAP
3483 .irq_ack = ir_ack_apic_edge,
3485 .irq_set_affinity = ir_msi_set_affinity,
3488 .irq_retrigger = ioapic_retrigger_irq,
3491 static struct irq_chip hpet_msi_type = {
3493 .irq_unmask = hpet_msi_unmask,
3494 .irq_mask = hpet_msi_mask,
3495 .irq_ack = ack_apic_edge,
3497 .irq_set_affinity = hpet_msi_set_affinity,
3499 .irq_retrigger = ioapic_retrigger_irq,
3502 int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3507 if (intr_remapping_enabled) {
3508 struct intel_iommu *iommu = map_hpet_to_ir(id);
3514 index = alloc_irte(iommu, irq, 1);
3519 ret = msi_compose_msg(NULL, irq, &msg, id);
3523 hpet_msi_write(get_irq_data(irq), &msg);
3524 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3525 if (irq_remapped(get_irq_chip_data(irq)))
3526 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
3527 handle_edge_irq, "edge");
3529 set_irq_chip_and_handler_name(irq, &hpet_msi_type,
3530 handle_edge_irq, "edge");
3536 #endif /* CONFIG_PCI_MSI */
3538 * Hypertransport interrupt support
3540 #ifdef CONFIG_HT_IRQ
3544 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3546 struct ht_irq_msg msg;
3547 fetch_ht_irq_msg(irq, &msg);
3549 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3550 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3552 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3553 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3555 write_ht_irq_msg(irq, &msg);
3559 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3561 struct irq_cfg *cfg = data->chip_data;
3564 if (__ioapic_set_affinity(data, mask, &dest))
3567 target_ht_irq(data->irq, dest, cfg->vector);
3573 static struct irq_chip ht_irq_chip = {
3575 .irq_mask = mask_ht_irq,
3576 .irq_unmask = unmask_ht_irq,
3577 .irq_ack = ack_apic_edge,
3579 .irq_set_affinity = ht_set_affinity,
3581 .irq_retrigger = ioapic_retrigger_irq,
3584 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3586 struct irq_cfg *cfg;
3593 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3595 struct ht_irq_msg msg;
3598 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3599 apic->target_cpus());
3601 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3605 HT_IRQ_LOW_DEST_ID(dest) |
3606 HT_IRQ_LOW_VECTOR(cfg->vector) |
3607 ((apic->irq_dest_mode == 0) ?
3608 HT_IRQ_LOW_DM_PHYSICAL :
3609 HT_IRQ_LOW_DM_LOGICAL) |
3610 HT_IRQ_LOW_RQEOI_EDGE |
3611 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3612 HT_IRQ_LOW_MT_FIXED :
3613 HT_IRQ_LOW_MT_ARBITRATED) |
3614 HT_IRQ_LOW_IRQ_MASKED;
3616 write_ht_irq_msg(irq, &msg);
3618 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3619 handle_edge_irq, "edge");
3621 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3625 #endif /* CONFIG_HT_IRQ */
3627 int __init io_apic_get_redir_entries (int ioapic)
3629 union IO_APIC_reg_01 reg_01;
3630 unsigned long flags;
3632 raw_spin_lock_irqsave(&ioapic_lock, flags);
3633 reg_01.raw = io_apic_read(ioapic, 1);
3634 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3636 /* The register returns the maximum index redir index
3637 * supported, which is one less than the total number of redir
3640 return reg_01.bits.entries + 1;
3643 void __init probe_nr_irqs_gsi(void)
3647 nr = gsi_top + NR_IRQS_LEGACY;
3648 if (nr > nr_irqs_gsi)
3651 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3654 #ifdef CONFIG_SPARSE_IRQ
3655 int __init arch_probe_nr_irqs(void)
3659 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3660 nr_irqs = NR_VECTORS * nr_cpu_ids;
3662 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3663 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3665 * for MSI and HT dyn irq
3667 nr += nr_irqs_gsi * 16;
3672 return NR_IRQS_LEGACY;
3676 static int __io_apic_set_pci_routing(struct device *dev, int irq,
3677 struct io_apic_irq_attr *irq_attr)
3679 struct irq_cfg *cfg;
3682 int trigger, polarity;
3684 ioapic = irq_attr->ioapic;
3685 if (!IO_APIC_IRQ(irq)) {
3686 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3692 node = dev_to_node(dev);
3694 node = cpu_to_node(0);
3696 cfg = alloc_irq_and_cfg_at(irq, node);
3700 pin = irq_attr->ioapic_pin;
3701 trigger = irq_attr->trigger;
3702 polarity = irq_attr->polarity;
3705 * IRQs < 16 are already in the irq_2_pin[] map
3707 if (irq >= legacy_pic->nr_legacy_irqs) {
3708 if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) {
3709 printk(KERN_INFO "can not add pin %d for irq %d\n",
3715 setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity);
3720 int io_apic_set_pci_routing(struct device *dev, int irq,
3721 struct io_apic_irq_attr *irq_attr)
3725 * Avoid pin reprogramming. PRTs typically include entries
3726 * with redundant pin->gsi mappings (but unique PCI devices);
3727 * we only program the IOAPIC on the first.
3729 ioapic = irq_attr->ioapic;
3730 pin = irq_attr->ioapic_pin;
3731 if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
3732 pr_debug("Pin %d-%d already programmed\n",
3733 mp_ioapics[ioapic].apicid, pin);
3736 set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);
3738 return __io_apic_set_pci_routing(dev, irq, irq_attr);
3741 u8 __init io_apic_unique_id(u8 id)
3743 #ifdef CONFIG_X86_32
3744 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3745 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3746 return io_apic_get_unique_id(nr_ioapics, id);
3751 DECLARE_BITMAP(used, 256);
3753 bitmap_zero(used, 256);
3754 for (i = 0; i < nr_ioapics; i++) {
3755 struct mpc_ioapic *ia = &mp_ioapics[i];
3756 __set_bit(ia->apicid, used);
3758 if (!test_bit(id, used))
3760 return find_first_zero_bit(used, 256);
3764 #ifdef CONFIG_X86_32
3765 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3767 union IO_APIC_reg_00 reg_00;
3768 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3770 unsigned long flags;
3774 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3775 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3776 * supports up to 16 on one shared APIC bus.
3778 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3779 * advantage of new APIC bus architecture.
3782 if (physids_empty(apic_id_map))
3783 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3785 raw_spin_lock_irqsave(&ioapic_lock, flags);
3786 reg_00.raw = io_apic_read(ioapic, 0);
3787 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3789 if (apic_id >= get_physical_broadcast()) {
3790 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3791 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3792 apic_id = reg_00.bits.ID;
3796 * Every APIC in a system must have a unique ID or we get lots of nice
3797 * 'stuck on smp_invalidate_needed IPI wait' messages.
3799 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
3801 for (i = 0; i < get_physical_broadcast(); i++) {
3802 if (!apic->check_apicid_used(&apic_id_map, i))
3806 if (i == get_physical_broadcast())
3807 panic("Max apic_id exceeded!\n");
3809 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3810 "trying %d\n", ioapic, apic_id, i);
3815 apic->apicid_to_cpu_present(apic_id, &tmp);
3816 physids_or(apic_id_map, apic_id_map, tmp);
3818 if (reg_00.bits.ID != apic_id) {
3819 reg_00.bits.ID = apic_id;
3821 raw_spin_lock_irqsave(&ioapic_lock, flags);
3822 io_apic_write(ioapic, 0, reg_00.raw);
3823 reg_00.raw = io_apic_read(ioapic, 0);
3824 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3827 if (reg_00.bits.ID != apic_id) {
3828 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3833 apic_printk(APIC_VERBOSE, KERN_INFO
3834 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3840 int __init io_apic_get_version(int ioapic)
3842 union IO_APIC_reg_01 reg_01;
3843 unsigned long flags;
3845 raw_spin_lock_irqsave(&ioapic_lock, flags);
3846 reg_01.raw = io_apic_read(ioapic, 1);
3847 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3849 return reg_01.bits.version;
3852 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
3854 int ioapic, pin, idx;
3856 if (skip_ioapic_setup)
3859 ioapic = mp_find_ioapic(gsi);
3863 pin = mp_find_ioapic_pin(ioapic, gsi);
3867 idx = find_irq_entry(ioapic, pin, mp_INT);
3871 *trigger = irq_trigger(idx);
3872 *polarity = irq_polarity(idx);
3877 * This function currently is only a helper for the i386 smp boot process where
3878 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3879 * so mask in all cases should simply be apic->target_cpus()
3882 void __init setup_ioapic_dest(void)
3884 int pin, ioapic, irq, irq_entry;
3885 struct irq_desc *desc;
3886 const struct cpumask *mask;
3888 if (skip_ioapic_setup == 1)
3891 for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
3892 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3893 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3894 if (irq_entry == -1)
3896 irq = pin_2_irq(irq_entry, ioapic, pin);
3898 if ((ioapic > 0) && (irq > 16))
3901 desc = irq_to_desc(irq);
3904 * Honour affinities which have been set in early boot
3907 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3908 mask = desc->irq_data.affinity;
3910 mask = apic->target_cpus();
3912 if (intr_remapping_enabled)
3913 ir_ioapic_set_affinity(&desc->irq_data, mask, false);
3915 ioapic_set_affinity(&desc->irq_data, mask, false);
3921 #define IOAPIC_RESOURCE_NAME_SIZE 11
3923 static struct resource *ioapic_resources;
3925 static struct resource * __init ioapic_setup_resources(int nr_ioapics)
3928 struct resource *res;
3932 if (nr_ioapics <= 0)
3935 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3938 mem = alloc_bootmem(n);
3941 mem += sizeof(struct resource) * nr_ioapics;
3943 for (i = 0; i < nr_ioapics; i++) {
3945 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3946 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
3947 mem += IOAPIC_RESOURCE_NAME_SIZE;
3950 ioapic_resources = res;
3955 void __init ioapic_init_mappings(void)
3957 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3958 struct resource *ioapic_res;
3961 ioapic_res = ioapic_setup_resources(nr_ioapics);
3962 for (i = 0; i < nr_ioapics; i++) {
3963 if (smp_found_config) {
3964 ioapic_phys = mp_ioapics[i].apicaddr;
3965 #ifdef CONFIG_X86_32
3968 "WARNING: bogus zero IO-APIC "
3969 "address found in MPTABLE, "
3970 "disabling IO/APIC support!\n");
3971 smp_found_config = 0;
3972 skip_ioapic_setup = 1;
3973 goto fake_ioapic_page;
3977 #ifdef CONFIG_X86_32
3980 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
3981 ioapic_phys = __pa(ioapic_phys);
3983 set_fixmap_nocache(idx, ioapic_phys);
3984 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
3985 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
3989 ioapic_res->start = ioapic_phys;
3990 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
3995 void __init ioapic_insert_resources(void)
3998 struct resource *r = ioapic_resources;
4003 "IO APIC resources couldn't be allocated.\n");
4007 for (i = 0; i < nr_ioapics; i++) {
4008 insert_resource(&iomem_resource, r);
4013 int mp_find_ioapic(u32 gsi)
4017 /* Find the IOAPIC that manages this GSI. */
4018 for (i = 0; i < nr_ioapics; i++) {
4019 if ((gsi >= mp_gsi_routing[i].gsi_base)
4020 && (gsi <= mp_gsi_routing[i].gsi_end))
4024 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
4028 int mp_find_ioapic_pin(int ioapic, u32 gsi)
4030 if (WARN_ON(ioapic == -1))
4032 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
4035 return gsi - mp_gsi_routing[ioapic].gsi_base;
4038 static int bad_ioapic(unsigned long address)
4040 if (nr_ioapics >= MAX_IO_APICS) {
4041 printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
4042 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
4046 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
4047 " found in table, skipping!\n");
4053 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
4058 if (bad_ioapic(address))
4063 mp_ioapics[idx].type = MP_IOAPIC;
4064 mp_ioapics[idx].flags = MPC_APIC_USABLE;
4065 mp_ioapics[idx].apicaddr = address;
4067 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
4068 mp_ioapics[idx].apicid = io_apic_unique_id(id);
4069 mp_ioapics[idx].apicver = io_apic_get_version(idx);
4072 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
4073 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
4075 entries = io_apic_get_redir_entries(idx);
4076 mp_gsi_routing[idx].gsi_base = gsi_base;
4077 mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;
4080 * The number of IO-APIC IRQ registers (== #pins):
4082 nr_ioapic_registers[idx] = entries;
4084 if (mp_gsi_routing[idx].gsi_end >= gsi_top)
4085 gsi_top = mp_gsi_routing[idx].gsi_end + 1;
4087 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
4088 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
4089 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
4090 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);
4095 /* Enable IOAPIC early just for system timer */
4096 void __init pre_init_apic_IRQ0(void)
4098 struct irq_cfg *cfg;
4100 printk(KERN_INFO "Early APIC setup for system timer0\n");
4102 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
4104 /* Make sure the irq descriptor is set up */
4105 cfg = alloc_irq_and_cfg_at(0, 0);
4109 add_pin_to_irq_node(cfg, 0, 0, 0);
4110 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
4112 setup_ioapic_irq(0, 0, 0, cfg, 0, 0);