1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <asm/io_apic.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
18 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
19 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
20 static int ir_ioapic_num, ir_hpet_num;
21 int intr_remapping_enabled;
23 static int disable_intremap;
24 static int disable_sourceid_checking;
26 static __init int setup_nointremap(char *str)
31 early_param("nointremap", setup_nointremap);
33 static __init int setup_intremap(char *str)
38 if (!strncmp(str, "on", 2))
40 else if (!strncmp(str, "off", 3))
42 else if (!strncmp(str, "nosid", 5))
43 disable_sourceid_checking = 1;
47 early_param("intremap", setup_intremap);
49 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
51 struct irq_cfg *cfg = get_irq_chip_data(irq);
52 return cfg ? &cfg->irq_2_iommu : NULL;
55 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
57 return irq_2_iommu(irq);
60 static void irq_2_iommu_free(unsigned int irq)
64 static DEFINE_SPINLOCK(irq_2_ir_lock);
66 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
68 struct irq_2_iommu *irq_iommu;
70 irq_iommu = irq_2_iommu(irq);
75 if (!irq_iommu->iommu)
81 int irq_remapped(int irq)
83 return valid_irq_2_iommu(irq) != NULL;
86 int get_irte(int irq, struct irte *entry)
89 struct irq_2_iommu *irq_iommu;
95 spin_lock_irqsave(&irq_2_ir_lock, flags);
96 irq_iommu = valid_irq_2_iommu(irq);
98 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
102 index = irq_iommu->irte_index + irq_iommu->sub_handle;
103 *entry = *(irq_iommu->iommu->ir_table->base + index);
105 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
109 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
111 struct ir_table *table = iommu->ir_table;
112 struct irq_2_iommu *irq_iommu;
113 u16 index, start_index;
114 unsigned int mask = 0;
121 #ifndef CONFIG_SPARSE_IRQ
122 /* protect irq_2_iommu_alloc later */
128 * start the IRTE search from index 0.
130 index = start_index = 0;
133 count = __roundup_pow_of_two(count);
137 if (mask > ecap_max_handle_mask(iommu->ecap)) {
139 "Requested mask %x exceeds the max invalidation handle"
140 " mask value %Lx\n", mask,
141 ecap_max_handle_mask(iommu->ecap));
145 spin_lock_irqsave(&irq_2_ir_lock, flags);
147 for (i = index; i < index + count; i++)
148 if (table->base[i].present)
150 /* empty index found */
151 if (i == index + count)
154 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
156 if (index == start_index) {
157 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
158 printk(KERN_ERR "can't allocate an IRTE\n");
163 for (i = index; i < index + count; i++)
164 table->base[i].present = 1;
166 irq_iommu = irq_2_iommu_alloc(irq);
168 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
169 printk(KERN_ERR "can't allocate irq_2_iommu\n");
173 irq_iommu->iommu = iommu;
174 irq_iommu->irte_index = index;
175 irq_iommu->sub_handle = 0;
176 irq_iommu->irte_mask = mask;
178 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
183 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
187 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
191 return qi_submit_sync(&desc, iommu);
194 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
197 struct irq_2_iommu *irq_iommu;
200 spin_lock_irqsave(&irq_2_ir_lock, flags);
201 irq_iommu = valid_irq_2_iommu(irq);
203 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
207 *sub_handle = irq_iommu->sub_handle;
208 index = irq_iommu->irte_index;
209 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
213 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
215 struct irq_2_iommu *irq_iommu;
218 spin_lock_irqsave(&irq_2_ir_lock, flags);
220 irq_iommu = irq_2_iommu_alloc(irq);
223 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
224 printk(KERN_ERR "can't allocate irq_2_iommu\n");
228 irq_iommu->iommu = iommu;
229 irq_iommu->irte_index = index;
230 irq_iommu->sub_handle = subhandle;
231 irq_iommu->irte_mask = 0;
233 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
238 int modify_irte(int irq, struct irte *irte_modified)
243 struct intel_iommu *iommu;
244 struct irq_2_iommu *irq_iommu;
247 spin_lock_irqsave(&irq_2_ir_lock, flags);
248 irq_iommu = valid_irq_2_iommu(irq);
250 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
254 iommu = irq_iommu->iommu;
256 index = irq_iommu->irte_index + irq_iommu->sub_handle;
257 irte = &iommu->ir_table->base[index];
259 set_64bit(&irte->low, irte_modified->low);
260 set_64bit(&irte->high, irte_modified->high);
261 __iommu_flush_cache(iommu, irte, sizeof(*irte));
263 rc = qi_flush_iec(iommu, index, 0);
264 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
269 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
273 for (i = 0; i < MAX_HPET_TBS; i++)
274 if (ir_hpet[i].id == hpet_id)
275 return ir_hpet[i].iommu;
279 struct intel_iommu *map_ioapic_to_ir(int apic)
283 for (i = 0; i < MAX_IO_APICS; i++)
284 if (ir_ioapic[i].id == apic)
285 return ir_ioapic[i].iommu;
289 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
291 struct dmar_drhd_unit *drhd;
293 drhd = dmar_find_matched_drhd_unit(dev);
300 static int clear_entries(struct irq_2_iommu *irq_iommu)
302 struct irte *start, *entry, *end;
303 struct intel_iommu *iommu;
306 if (irq_iommu->sub_handle)
309 iommu = irq_iommu->iommu;
310 index = irq_iommu->irte_index + irq_iommu->sub_handle;
312 start = iommu->ir_table->base + index;
313 end = start + (1 << irq_iommu->irte_mask);
315 for (entry = start; entry < end; entry++) {
316 set_64bit(&entry->low, 0);
317 set_64bit(&entry->high, 0);
320 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
323 int free_irte(int irq)
326 struct irq_2_iommu *irq_iommu;
329 spin_lock_irqsave(&irq_2_ir_lock, flags);
330 irq_iommu = valid_irq_2_iommu(irq);
332 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
336 rc = clear_entries(irq_iommu);
338 irq_iommu->iommu = NULL;
339 irq_iommu->irte_index = 0;
340 irq_iommu->sub_handle = 0;
341 irq_iommu->irte_mask = 0;
343 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
345 irq_2_iommu_free(irq);
351 * source validation type
353 #define SVT_NO_VERIFY 0x0 /* no verification is required */
354 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
355 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
358 * source-id qualifier
360 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
361 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
362 * the third least significant bit
364 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
365 * the second and third least significant bits
367 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
368 * the least three significant bits
372 * set SVT, SQ and SID fields of irte to verify
373 * source ids of interrupt requests
375 static void set_irte_sid(struct irte *irte, unsigned int svt,
376 unsigned int sq, unsigned int sid)
378 if (disable_sourceid_checking)
385 int set_ioapic_sid(struct irte *irte, int apic)
393 for (i = 0; i < MAX_IO_APICS; i++) {
394 if (ir_ioapic[i].id == apic) {
395 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
401 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
405 set_irte_sid(irte, 1, 0, sid);
410 int set_hpet_sid(struct irte *irte, u8 id)
418 for (i = 0; i < MAX_HPET_TBS; i++) {
419 if (ir_hpet[i].id == id) {
420 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
426 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
431 * Should really use SQ_ALL_16. Some platforms are broken.
432 * While we figure out the right quirks for these broken platforms, use
433 * SQ_13_IGNORE_3 for now.
435 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
440 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
442 struct pci_dev *bridge;
447 /* PCIe device or Root Complex integrated PCI device */
448 if (pci_is_pcie(dev) || !dev->bus->parent) {
449 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
450 (dev->bus->number << 8) | dev->devfn);
454 bridge = pci_find_upstream_pcie_bridge(dev);
456 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
457 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
458 (bridge->bus->number << 8) | dev->bus->number);
459 else /* this is a legacy PCI bridge */
460 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
461 (bridge->bus->number << 8) | bridge->devfn);
467 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
473 addr = virt_to_phys((void *)iommu->ir_table->base);
475 spin_lock_irqsave(&iommu->register_lock, flags);
477 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
478 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
480 /* Set interrupt-remapping table pointer */
481 iommu->gcmd |= DMA_GCMD_SIRTP;
482 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
484 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
485 readl, (sts & DMA_GSTS_IRTPS), sts);
486 spin_unlock_irqrestore(&iommu->register_lock, flags);
489 * global invalidation of interrupt entry cache before enabling
490 * interrupt-remapping.
492 qi_global_iec(iommu);
494 spin_lock_irqsave(&iommu->register_lock, flags);
496 /* Enable interrupt-remapping */
497 iommu->gcmd |= DMA_GCMD_IRE;
498 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
500 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
501 readl, (sts & DMA_GSTS_IRES), sts);
503 spin_unlock_irqrestore(&iommu->register_lock, flags);
507 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
509 struct ir_table *ir_table;
512 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
515 if (!iommu->ir_table)
518 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
519 INTR_REMAP_PAGE_ORDER);
522 printk(KERN_ERR "failed to allocate pages of order %d\n",
523 INTR_REMAP_PAGE_ORDER);
524 kfree(iommu->ir_table);
528 ir_table->base = page_address(pages);
530 iommu_set_intr_remapping(iommu, mode);
535 * Disable Interrupt Remapping.
537 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
542 if (!ecap_ir_support(iommu->ecap))
546 * global invalidation of interrupt entry cache before disabling
547 * interrupt-remapping.
549 qi_global_iec(iommu);
551 spin_lock_irqsave(&iommu->register_lock, flags);
553 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
554 if (!(sts & DMA_GSTS_IRES))
557 iommu->gcmd &= ~DMA_GCMD_IRE;
558 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
560 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
561 readl, !(sts & DMA_GSTS_IRES), sts);
564 spin_unlock_irqrestore(&iommu->register_lock, flags);
567 int __init intr_remapping_supported(void)
569 struct dmar_drhd_unit *drhd;
571 if (disable_intremap)
574 if (!dmar_ir_support())
577 for_each_drhd_unit(drhd) {
578 struct intel_iommu *iommu = drhd->iommu;
580 if (!ecap_ir_support(iommu->ecap))
587 int __init enable_intr_remapping(int eim)
589 struct dmar_drhd_unit *drhd;
592 if (parse_ioapics_under_ir() != 1) {
593 printk(KERN_INFO "Not enable interrupt remapping\n");
597 for_each_drhd_unit(drhd) {
598 struct intel_iommu *iommu = drhd->iommu;
601 * If the queued invalidation is already initialized,
602 * shouldn't disable it.
608 * Clear previous faults.
610 dmar_fault(-1, iommu);
613 * Disable intr remapping and queued invalidation, if already
614 * enabled prior to OS handover.
616 iommu_disable_intr_remapping(iommu);
618 dmar_disable_qi(iommu);
622 * check for the Interrupt-remapping support
624 for_each_drhd_unit(drhd) {
625 struct intel_iommu *iommu = drhd->iommu;
627 if (!ecap_ir_support(iommu->ecap))
630 if (eim && !ecap_eim_support(iommu->ecap)) {
631 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
632 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
638 * Enable queued invalidation for all the DRHD's.
640 for_each_drhd_unit(drhd) {
642 struct intel_iommu *iommu = drhd->iommu;
643 ret = dmar_enable_qi(iommu);
646 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
647 " invalidation, ecap %Lx, ret %d\n",
648 drhd->reg_base_addr, iommu->ecap, ret);
654 * Setup Interrupt-remapping for all the DRHD's now.
656 for_each_drhd_unit(drhd) {
657 struct intel_iommu *iommu = drhd->iommu;
659 if (!ecap_ir_support(iommu->ecap))
662 if (setup_intr_remapping(iommu, eim))
671 intr_remapping_enabled = 1;
677 * handle error condition gracefully here!
682 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
683 struct intel_iommu *iommu)
685 struct acpi_dmar_pci_path *path;
690 path = (struct acpi_dmar_pci_path *)(scope + 1);
691 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
692 / sizeof(struct acpi_dmar_pci_path);
694 while (--count > 0) {
696 * Access PCI directly due to the PCI
697 * subsystem isn't initialized yet.
699 bus = read_pci_config_byte(bus, path->dev, path->fn,
703 ir_hpet[ir_hpet_num].bus = bus;
704 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
705 ir_hpet[ir_hpet_num].iommu = iommu;
706 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
710 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
711 struct intel_iommu *iommu)
713 struct acpi_dmar_pci_path *path;
718 path = (struct acpi_dmar_pci_path *)(scope + 1);
719 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
720 / sizeof(struct acpi_dmar_pci_path);
722 while (--count > 0) {
724 * Access PCI directly due to the PCI
725 * subsystem isn't initialized yet.
727 bus = read_pci_config_byte(bus, path->dev, path->fn,
732 ir_ioapic[ir_ioapic_num].bus = bus;
733 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
734 ir_ioapic[ir_ioapic_num].iommu = iommu;
735 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
739 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
740 struct intel_iommu *iommu)
742 struct acpi_dmar_hardware_unit *drhd;
743 struct acpi_dmar_device_scope *scope;
746 drhd = (struct acpi_dmar_hardware_unit *)header;
748 start = (void *)(drhd + 1);
749 end = ((void *)drhd) + header->length;
751 while (start < end) {
753 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
754 if (ir_ioapic_num == MAX_IO_APICS) {
755 printk(KERN_WARNING "Exceeded Max IO APICS\n");
759 printk(KERN_INFO "IOAPIC id %d under DRHD base "
760 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
761 drhd->address, iommu->seq_id);
763 ir_parse_one_ioapic_scope(scope, iommu);
764 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
765 if (ir_hpet_num == MAX_HPET_TBS) {
766 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
770 printk(KERN_INFO "HPET id %d under DRHD base"
771 " 0x%Lx\n", scope->enumeration_id,
774 ir_parse_one_hpet_scope(scope, iommu);
776 start += scope->length;
783 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
786 int __init parse_ioapics_under_ir(void)
788 struct dmar_drhd_unit *drhd;
789 int ir_supported = 0;
791 for_each_drhd_unit(drhd) {
792 struct intel_iommu *iommu = drhd->iommu;
794 if (ecap_ir_support(iommu->ecap)) {
795 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
802 if (ir_supported && ir_ioapic_num != nr_ioapics) {
804 "Not all IO-APIC's listed under remapping hardware\n");
811 void disable_intr_remapping(void)
813 struct dmar_drhd_unit *drhd;
814 struct intel_iommu *iommu = NULL;
817 * Disable Interrupt-remapping for all the DRHD's now.
819 for_each_iommu(iommu, drhd) {
820 if (!ecap_ir_support(iommu->ecap))
823 iommu_disable_intr_remapping(iommu);
827 int reenable_intr_remapping(int eim)
829 struct dmar_drhd_unit *drhd;
831 struct intel_iommu *iommu = NULL;
833 for_each_iommu(iommu, drhd)
835 dmar_reenable_qi(iommu);
838 * Setup Interrupt-remapping for all the DRHD's now.
840 for_each_iommu(iommu, drhd) {
841 if (!ecap_ir_support(iommu->ecap))
844 /* Set up interrupt remapping for iommu.*/
845 iommu_set_intr_remapping(iommu, eim);
856 * handle error condition gracefully here!