2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <linux/dma-contiguous.h>
46 #include <linux/crash_dump.h>
47 #include <asm/irq_remapping.h>
48 #include <asm/cacheflush.h>
49 #include <asm/iommu.h>
51 #include "irq_remapping.h"
53 #define ROOT_SIZE VTD_PAGE_SIZE
54 #define CONTEXT_SIZE VTD_PAGE_SIZE
56 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
57 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
58 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
59 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
61 #define IOAPIC_RANGE_START (0xfee00000)
62 #define IOAPIC_RANGE_END (0xfeefffff)
63 #define IOVA_START_ADDR (0x1000)
65 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
67 #define MAX_AGAW_WIDTH 64
68 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
70 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
71 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
73 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
74 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
75 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
76 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
77 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
79 /* IO virtual address start page frame number */
80 #define IOVA_START_PFN (1)
82 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
83 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
84 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
86 /* page table handling */
87 #define LEVEL_STRIDE (9)
88 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
91 * This bitmap is used to advertise the page sizes our hardware support
92 * to the IOMMU core, which will then use this information to split
93 * physically contiguous memory regions it is mapping into page sizes
96 * Traditionally the IOMMU core just handed us the mappings directly,
97 * after making sure the size is an order of a 4KiB page and that the
98 * mapping has natural alignment.
100 * To retain this behavior, we currently advertise that we support
101 * all page sizes that are an order of 4KiB.
103 * If at some point we'd like to utilize the IOMMU core's new behavior,
104 * we could change this to advertise the real page sizes we support.
106 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
108 static inline int agaw_to_level(int agaw)
113 static inline int agaw_to_width(int agaw)
115 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
118 static inline int width_to_agaw(int width)
120 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
123 static inline unsigned int level_to_offset_bits(int level)
125 return (level - 1) * LEVEL_STRIDE;
128 static inline int pfn_level_offset(unsigned long pfn, int level)
130 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133 static inline unsigned long level_mask(int level)
135 return -1UL << level_to_offset_bits(level);
138 static inline unsigned long level_size(int level)
140 return 1UL << level_to_offset_bits(level);
143 static inline unsigned long align_to_level(unsigned long pfn, int level)
145 return (pfn + level_size(level) - 1) & level_mask(level);
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
150 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154 are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
157 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
162 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
164 static inline unsigned long page_to_dma_pfn(struct page *pg)
166 return mm_to_dma_pfn(page_to_pfn(pg));
168 static inline unsigned long virt_to_dma_pfn(void *p)
170 return page_to_dma_pfn(virt_to_page(p));
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu **g_iommus;
176 static void __init check_tylersburg_isoch(void);
177 static int rwbf_quirk;
180 * set to 1 to panic kernel if can't successfully enable VT-d
181 * (used when kernel is launched w/ TXT)
183 static int force_on = 0;
188 * 12-63: Context Ptr (12 - (haw-1))
195 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
198 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
201 static phys_addr_t root_entry_lctp(struct root_entry *re)
206 return re->lo & VTD_PAGE_MASK;
210 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
213 static phys_addr_t root_entry_uctp(struct root_entry *re)
218 return re->hi & VTD_PAGE_MASK;
223 * 1: fault processing disable
224 * 2-3: translation type
225 * 12-63: address space root
231 struct context_entry {
236 static inline void context_clear_pasid_enable(struct context_entry *context)
238 context->lo &= ~(1ULL << 11);
241 static inline bool context_pasid_enabled(struct context_entry *context)
243 return !!(context->lo & (1ULL << 11));
246 static inline void context_set_copied(struct context_entry *context)
248 context->hi |= (1ull << 3);
251 static inline bool context_copied(struct context_entry *context)
253 return !!(context->hi & (1ULL << 3));
256 static inline bool __context_present(struct context_entry *context)
258 return (context->lo & 1);
261 static inline bool context_present(struct context_entry *context)
263 return context_pasid_enabled(context) ?
264 __context_present(context) :
265 __context_present(context) && !context_copied(context);
268 static inline void context_set_present(struct context_entry *context)
273 static inline void context_set_fault_enable(struct context_entry *context)
275 context->lo &= (((u64)-1) << 2) | 1;
278 static inline void context_set_translation_type(struct context_entry *context,
281 context->lo &= (((u64)-1) << 4) | 3;
282 context->lo |= (value & 3) << 2;
285 static inline void context_set_address_root(struct context_entry *context,
288 context->lo &= ~VTD_PAGE_MASK;
289 context->lo |= value & VTD_PAGE_MASK;
292 static inline void context_set_address_width(struct context_entry *context,
295 context->hi |= value & 7;
298 static inline void context_set_domain_id(struct context_entry *context,
301 context->hi |= (value & ((1 << 16) - 1)) << 8;
304 static inline int context_domain_id(struct context_entry *c)
306 return((c->hi >> 8) & 0xffff);
309 static inline void context_clear_entry(struct context_entry *context)
322 * 12-63: Host physcial address
328 static inline void dma_clear_pte(struct dma_pte *pte)
333 static inline u64 dma_pte_addr(struct dma_pte *pte)
336 return pte->val & VTD_PAGE_MASK;
338 /* Must have a full atomic 64-bit read */
339 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
343 static inline bool dma_pte_present(struct dma_pte *pte)
345 return (pte->val & 3) != 0;
348 static inline bool dma_pte_superpage(struct dma_pte *pte)
350 return (pte->val & DMA_PTE_LARGE_PAGE);
353 static inline int first_pte_in_page(struct dma_pte *pte)
355 return !((unsigned long)pte & ~VTD_PAGE_MASK);
359 * This domain is a statically identity mapping domain.
360 * 1. This domain creats a static 1:1 mapping to all usable memory.
361 * 2. It maps to each iommu if successful.
362 * 3. Each iommu mapps to this domain if successful.
364 static struct dmar_domain *si_domain;
365 static int hw_pass_through = 1;
367 /* domain represents a virtual machine, more than one devices
368 * across iommus may be owned in one domain, e.g. kvm guest.
370 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
372 /* si_domain contains mulitple devices */
373 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
376 int id; /* domain id */
377 int nid; /* node id */
378 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
379 /* bitmap of iommus this domain uses*/
381 struct list_head devices; /* all devices' list */
382 struct iova_domain iovad; /* iova's that belong to this domain */
384 struct dma_pte *pgd; /* virtual address */
385 int gaw; /* max guest address width */
387 /* adjusted guest address width, 0 is level 2 30-bit */
390 int flags; /* flags to find out type of domain */
392 int iommu_coherency;/* indicate coherency of iommu access */
393 int iommu_snooping; /* indicate snooping control feature*/
394 int iommu_count; /* reference count of iommu */
395 int iommu_superpage;/* Level of superpages supported:
396 0 == 4KiB (no superpages), 1 == 2MiB,
397 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
398 spinlock_t iommu_lock; /* protect iommu set in domain */
399 u64 max_addr; /* maximum mapped address */
401 struct iommu_domain domain; /* generic domain data structure for
405 /* PCI domain-device relationship */
406 struct device_domain_info {
407 struct list_head link; /* link to domain siblings */
408 struct list_head global; /* link to global list */
409 u8 bus; /* PCI bus number */
410 u8 devfn; /* PCI devfn number */
411 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
412 struct intel_iommu *iommu; /* IOMMU used by this device */
413 struct dmar_domain *domain; /* pointer to domain */
416 struct dmar_rmrr_unit {
417 struct list_head list; /* list of rmrr units */
418 struct acpi_dmar_header *hdr; /* ACPI header */
419 u64 base_address; /* reserved base address*/
420 u64 end_address; /* reserved end address */
421 struct dmar_dev_scope *devices; /* target devices */
422 int devices_cnt; /* target device count */
425 struct dmar_atsr_unit {
426 struct list_head list; /* list of ATSR units */
427 struct acpi_dmar_header *hdr; /* ACPI header */
428 struct dmar_dev_scope *devices; /* target devices */
429 int devices_cnt; /* target device count */
430 u8 include_all:1; /* include all ports */
433 static LIST_HEAD(dmar_atsr_units);
434 static LIST_HEAD(dmar_rmrr_units);
436 #define for_each_rmrr_units(rmrr) \
437 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
439 static void flush_unmaps_timeout(unsigned long data);
441 static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
443 #define HIGH_WATER_MARK 250
444 struct deferred_flush_tables {
446 struct iova *iova[HIGH_WATER_MARK];
447 struct dmar_domain *domain[HIGH_WATER_MARK];
448 struct page *freelist[HIGH_WATER_MARK];
451 static struct deferred_flush_tables *deferred_flush;
453 /* bitmap for indexing intel_iommus */
454 static int g_num_of_iommus;
456 static DEFINE_SPINLOCK(async_umap_flush_lock);
457 static LIST_HEAD(unmaps_to_do);
460 static long list_size;
462 static void domain_exit(struct dmar_domain *domain);
463 static void domain_remove_dev_info(struct dmar_domain *domain);
464 static void domain_remove_one_dev_info(struct dmar_domain *domain,
466 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
468 static int domain_detach_iommu(struct dmar_domain *domain,
469 struct intel_iommu *iommu);
471 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
472 int dmar_disabled = 0;
474 int dmar_disabled = 1;
475 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
477 int intel_iommu_enabled = 0;
478 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
480 static int dmar_map_gfx = 1;
481 static int dmar_forcedac;
482 static int intel_iommu_strict;
483 static int intel_iommu_superpage = 1;
484 static int intel_iommu_ecs = 1;
486 /* We only actually use ECS when PASID support (on the new bit 40)
487 * is also advertised. Some early implementations — the ones with
488 * PASID support on bit 28 — have issues even when we *only* use
489 * extended root/context tables. */
490 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
491 ecap_pasid(iommu->ecap))
493 int intel_iommu_gfx_mapped;
494 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
496 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
497 static DEFINE_SPINLOCK(device_domain_lock);
498 static LIST_HEAD(device_domain_list);
500 static const struct iommu_ops intel_iommu_ops;
502 static bool translation_pre_enabled(struct intel_iommu *iommu)
504 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
507 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
509 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
512 static void init_translation_status(struct intel_iommu *iommu)
516 gsts = readl(iommu->reg + DMAR_GSTS_REG);
517 if (gsts & DMA_GSTS_TES)
518 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
521 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
522 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
524 return container_of(dom, struct dmar_domain, domain);
527 static int __init intel_iommu_setup(char *str)
532 if (!strncmp(str, "on", 2)) {
534 pr_info("IOMMU enabled\n");
535 } else if (!strncmp(str, "off", 3)) {
537 pr_info("IOMMU disabled\n");
538 } else if (!strncmp(str, "igfx_off", 8)) {
540 pr_info("Disable GFX device mapping\n");
541 } else if (!strncmp(str, "forcedac", 8)) {
542 pr_info("Forcing DAC for PCI devices\n");
544 } else if (!strncmp(str, "strict", 6)) {
545 pr_info("Disable batched IOTLB flush\n");
546 intel_iommu_strict = 1;
547 } else if (!strncmp(str, "sp_off", 6)) {
548 pr_info("Disable supported super page\n");
549 intel_iommu_superpage = 0;
550 } else if (!strncmp(str, "ecs_off", 7)) {
552 "Intel-IOMMU: disable extended context table support\n");
556 str += strcspn(str, ",");
562 __setup("intel_iommu=", intel_iommu_setup);
564 static struct kmem_cache *iommu_domain_cache;
565 static struct kmem_cache *iommu_devinfo_cache;
567 static inline void *alloc_pgtable_page(int node)
572 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
574 vaddr = page_address(page);
578 static inline void free_pgtable_page(void *vaddr)
580 free_page((unsigned long)vaddr);
583 static inline void *alloc_domain_mem(void)
585 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
588 static void free_domain_mem(void *vaddr)
590 kmem_cache_free(iommu_domain_cache, vaddr);
593 static inline void * alloc_devinfo_mem(void)
595 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
598 static inline void free_devinfo_mem(void *vaddr)
600 kmem_cache_free(iommu_devinfo_cache, vaddr);
603 static inline int domain_type_is_vm(struct dmar_domain *domain)
605 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
608 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
610 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
611 DOMAIN_FLAG_STATIC_IDENTITY);
614 static inline int domain_pfn_supported(struct dmar_domain *domain,
617 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
619 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
622 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
627 sagaw = cap_sagaw(iommu->cap);
628 for (agaw = width_to_agaw(max_gaw);
630 if (test_bit(agaw, &sagaw))
638 * Calculate max SAGAW for each iommu.
640 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
642 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
646 * calculate agaw for each iommu.
647 * "SAGAW" may be different across iommus, use a default agaw, and
648 * get a supported less agaw for iommus that don't support the default agaw.
650 int iommu_calculate_agaw(struct intel_iommu *iommu)
652 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
655 /* This functionin only returns single iommu in a domain */
656 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
660 /* si_domain and vm domain should not get here. */
661 BUG_ON(domain_type_is_vm_or_si(domain));
662 iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
663 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
666 return g_iommus[iommu_id];
669 static void domain_update_iommu_coherency(struct dmar_domain *domain)
671 struct dmar_drhd_unit *drhd;
672 struct intel_iommu *iommu;
676 domain->iommu_coherency = 1;
678 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
680 if (!ecap_coherent(g_iommus[i]->ecap)) {
681 domain->iommu_coherency = 0;
688 /* No hardware attached; use lowest common denominator */
690 for_each_active_iommu(iommu, drhd) {
691 if (!ecap_coherent(iommu->ecap)) {
692 domain->iommu_coherency = 0;
699 static int domain_update_iommu_snooping(struct intel_iommu *skip)
701 struct dmar_drhd_unit *drhd;
702 struct intel_iommu *iommu;
706 for_each_active_iommu(iommu, drhd) {
708 if (!ecap_sc_support(iommu->ecap)) {
719 static int domain_update_iommu_superpage(struct intel_iommu *skip)
721 struct dmar_drhd_unit *drhd;
722 struct intel_iommu *iommu;
725 if (!intel_iommu_superpage) {
729 /* set iommu_superpage to the smallest common denominator */
731 for_each_active_iommu(iommu, drhd) {
733 mask &= cap_super_page_val(iommu->cap);
743 /* Some capabilities may be different across iommus */
744 static void domain_update_iommu_cap(struct dmar_domain *domain)
746 domain_update_iommu_coherency(domain);
747 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
748 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
751 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
752 u8 bus, u8 devfn, int alloc)
754 struct root_entry *root = &iommu->root_entry[bus];
755 struct context_entry *context;
758 if (ecs_enabled(iommu)) {
767 context = phys_to_virt(*entry & VTD_PAGE_MASK);
769 unsigned long phy_addr;
773 context = alloc_pgtable_page(iommu->node);
777 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
778 phy_addr = virt_to_phys((void *)context);
779 *entry = phy_addr | 1;
780 __iommu_flush_cache(iommu, entry, sizeof(*entry));
782 return &context[devfn];
785 static int iommu_dummy(struct device *dev)
787 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
790 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
792 struct dmar_drhd_unit *drhd = NULL;
793 struct intel_iommu *iommu;
795 struct pci_dev *ptmp, *pdev = NULL;
799 if (iommu_dummy(dev))
802 if (dev_is_pci(dev)) {
803 pdev = to_pci_dev(dev);
804 segment = pci_domain_nr(pdev->bus);
805 } else if (has_acpi_companion(dev))
806 dev = &ACPI_COMPANION(dev)->dev;
809 for_each_active_iommu(iommu, drhd) {
810 if (pdev && segment != drhd->segment)
813 for_each_active_dev_scope(drhd->devices,
814 drhd->devices_cnt, i, tmp) {
816 *bus = drhd->devices[i].bus;
817 *devfn = drhd->devices[i].devfn;
821 if (!pdev || !dev_is_pci(tmp))
824 ptmp = to_pci_dev(tmp);
825 if (ptmp->subordinate &&
826 ptmp->subordinate->number <= pdev->bus->number &&
827 ptmp->subordinate->busn_res.end >= pdev->bus->number)
831 if (pdev && drhd->include_all) {
833 *bus = pdev->bus->number;
834 *devfn = pdev->devfn;
845 static void domain_flush_cache(struct dmar_domain *domain,
846 void *addr, int size)
848 if (!domain->iommu_coherency)
849 clflush_cache_range(addr, size);
852 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
854 struct context_entry *context;
858 spin_lock_irqsave(&iommu->lock, flags);
859 context = iommu_context_addr(iommu, bus, devfn, 0);
861 ret = context_present(context);
862 spin_unlock_irqrestore(&iommu->lock, flags);
866 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
868 struct context_entry *context;
871 spin_lock_irqsave(&iommu->lock, flags);
872 context = iommu_context_addr(iommu, bus, devfn, 0);
874 context_clear_entry(context);
875 __iommu_flush_cache(iommu, context, sizeof(*context));
877 spin_unlock_irqrestore(&iommu->lock, flags);
880 static void free_context_table(struct intel_iommu *iommu)
884 struct context_entry *context;
886 spin_lock_irqsave(&iommu->lock, flags);
887 if (!iommu->root_entry) {
890 for (i = 0; i < ROOT_ENTRY_NR; i++) {
891 context = iommu_context_addr(iommu, i, 0, 0);
893 free_pgtable_page(context);
895 if (!ecs_enabled(iommu))
898 context = iommu_context_addr(iommu, i, 0x80, 0);
900 free_pgtable_page(context);
903 free_pgtable_page(iommu->root_entry);
904 iommu->root_entry = NULL;
906 spin_unlock_irqrestore(&iommu->lock, flags);
909 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
910 unsigned long pfn, int *target_level)
912 struct dma_pte *parent, *pte = NULL;
913 int level = agaw_to_level(domain->agaw);
916 BUG_ON(!domain->pgd);
918 if (!domain_pfn_supported(domain, pfn))
919 /* Address beyond IOMMU's addressing capabilities. */
922 parent = domain->pgd;
927 offset = pfn_level_offset(pfn, level);
928 pte = &parent[offset];
929 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
931 if (level == *target_level)
934 if (!dma_pte_present(pte)) {
937 tmp_page = alloc_pgtable_page(domain->nid);
942 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
943 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
944 if (cmpxchg64(&pte->val, 0ULL, pteval))
945 /* Someone else set it while we were thinking; use theirs. */
946 free_pgtable_page(tmp_page);
948 domain_flush_cache(domain, pte, sizeof(*pte));
953 parent = phys_to_virt(dma_pte_addr(pte));
958 *target_level = level;
964 /* return address's pte at specific level */
965 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
967 int level, int *large_page)
969 struct dma_pte *parent, *pte = NULL;
970 int total = agaw_to_level(domain->agaw);
973 parent = domain->pgd;
974 while (level <= total) {
975 offset = pfn_level_offset(pfn, total);
976 pte = &parent[offset];
980 if (!dma_pte_present(pte)) {
985 if (dma_pte_superpage(pte)) {
990 parent = phys_to_virt(dma_pte_addr(pte));
996 /* clear last level pte, a tlb flush should be followed */
997 static void dma_pte_clear_range(struct dmar_domain *domain,
998 unsigned long start_pfn,
999 unsigned long last_pfn)
1001 unsigned int large_page = 1;
1002 struct dma_pte *first_pte, *pte;
1004 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1005 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1006 BUG_ON(start_pfn > last_pfn);
1008 /* we don't need lock here; nobody else touches the iova range */
1011 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1013 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1018 start_pfn += lvl_to_nr_pages(large_page);
1020 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1022 domain_flush_cache(domain, first_pte,
1023 (void *)pte - (void *)first_pte);
1025 } while (start_pfn && start_pfn <= last_pfn);
1028 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1029 struct dma_pte *pte, unsigned long pfn,
1030 unsigned long start_pfn, unsigned long last_pfn)
1032 pfn = max(start_pfn, pfn);
1033 pte = &pte[pfn_level_offset(pfn, level)];
1036 unsigned long level_pfn;
1037 struct dma_pte *level_pte;
1039 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1042 level_pfn = pfn & level_mask(level - 1);
1043 level_pte = phys_to_virt(dma_pte_addr(pte));
1046 dma_pte_free_level(domain, level - 1, level_pte,
1047 level_pfn, start_pfn, last_pfn);
1049 /* If range covers entire pagetable, free it */
1050 if (!(start_pfn > level_pfn ||
1051 last_pfn < level_pfn + level_size(level) - 1)) {
1053 domain_flush_cache(domain, pte, sizeof(*pte));
1054 free_pgtable_page(level_pte);
1057 pfn += level_size(level);
1058 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1061 /* free page table pages. last level pte should already be cleared */
1062 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1063 unsigned long start_pfn,
1064 unsigned long last_pfn)
1066 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1067 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1068 BUG_ON(start_pfn > last_pfn);
1070 dma_pte_clear_range(domain, start_pfn, last_pfn);
1072 /* We don't need lock here; nobody else touches the iova range */
1073 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1074 domain->pgd, 0, start_pfn, last_pfn);
1077 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1078 free_pgtable_page(domain->pgd);
1083 /* When a page at a given level is being unlinked from its parent, we don't
1084 need to *modify* it at all. All we need to do is make a list of all the
1085 pages which can be freed just as soon as we've flushed the IOTLB and we
1086 know the hardware page-walk will no longer touch them.
1087 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1089 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1090 int level, struct dma_pte *pte,
1091 struct page *freelist)
1095 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1096 pg->freelist = freelist;
1102 pte = page_address(pg);
1104 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1105 freelist = dma_pte_list_pagetables(domain, level - 1,
1108 } while (!first_pte_in_page(pte));
1113 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1114 struct dma_pte *pte, unsigned long pfn,
1115 unsigned long start_pfn,
1116 unsigned long last_pfn,
1117 struct page *freelist)
1119 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1121 pfn = max(start_pfn, pfn);
1122 pte = &pte[pfn_level_offset(pfn, level)];
1125 unsigned long level_pfn;
1127 if (!dma_pte_present(pte))
1130 level_pfn = pfn & level_mask(level);
1132 /* If range covers entire pagetable, free it */
1133 if (start_pfn <= level_pfn &&
1134 last_pfn >= level_pfn + level_size(level) - 1) {
1135 /* These suborbinate page tables are going away entirely. Don't
1136 bother to clear them; we're just going to *free* them. */
1137 if (level > 1 && !dma_pte_superpage(pte))
1138 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1144 } else if (level > 1) {
1145 /* Recurse down into a level that isn't *entirely* obsolete */
1146 freelist = dma_pte_clear_level(domain, level - 1,
1147 phys_to_virt(dma_pte_addr(pte)),
1148 level_pfn, start_pfn, last_pfn,
1152 pfn += level_size(level);
1153 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1156 domain_flush_cache(domain, first_pte,
1157 (void *)++last_pte - (void *)first_pte);
1162 /* We can't just free the pages because the IOMMU may still be walking
1163 the page tables, and may have cached the intermediate levels. The
1164 pages can only be freed after the IOTLB flush has been done. */
1165 struct page *domain_unmap(struct dmar_domain *domain,
1166 unsigned long start_pfn,
1167 unsigned long last_pfn)
1169 struct page *freelist = NULL;
1171 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1172 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1173 BUG_ON(start_pfn > last_pfn);
1175 /* we don't need lock here; nobody else touches the iova range */
1176 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1177 domain->pgd, 0, start_pfn, last_pfn, NULL);
1180 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1181 struct page *pgd_page = virt_to_page(domain->pgd);
1182 pgd_page->freelist = freelist;
1183 freelist = pgd_page;
1191 void dma_free_pagelist(struct page *freelist)
1195 while ((pg = freelist)) {
1196 freelist = pg->freelist;
1197 free_pgtable_page(page_address(pg));
1201 /* iommu handling */
1202 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1204 struct root_entry *root;
1205 unsigned long flags;
1207 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1209 pr_err("Allocating root entry for %s failed\n",
1214 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1216 spin_lock_irqsave(&iommu->lock, flags);
1217 iommu->root_entry = root;
1218 spin_unlock_irqrestore(&iommu->lock, flags);
1223 static void iommu_set_root_entry(struct intel_iommu *iommu)
1229 addr = virt_to_phys(iommu->root_entry);
1230 if (ecs_enabled(iommu))
1231 addr |= DMA_RTADDR_RTT;
1233 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1234 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1236 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1238 /* Make sure hardware complete it */
1239 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1240 readl, (sts & DMA_GSTS_RTPS), sts);
1242 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1245 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1250 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1253 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1254 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1256 /* Make sure hardware complete it */
1257 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1258 readl, (!(val & DMA_GSTS_WBFS)), val);
1260 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1263 /* return value determine if we need a write buffer flush */
1264 static void __iommu_flush_context(struct intel_iommu *iommu,
1265 u16 did, u16 source_id, u8 function_mask,
1272 case DMA_CCMD_GLOBAL_INVL:
1273 val = DMA_CCMD_GLOBAL_INVL;
1275 case DMA_CCMD_DOMAIN_INVL:
1276 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1278 case DMA_CCMD_DEVICE_INVL:
1279 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1280 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1285 val |= DMA_CCMD_ICC;
1287 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1288 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1290 /* Make sure hardware complete it */
1291 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1292 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1294 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1297 /* return value determine if we need a write buffer flush */
1298 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1299 u64 addr, unsigned int size_order, u64 type)
1301 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1302 u64 val = 0, val_iva = 0;
1306 case DMA_TLB_GLOBAL_FLUSH:
1307 /* global flush doesn't need set IVA_REG */
1308 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1310 case DMA_TLB_DSI_FLUSH:
1311 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1313 case DMA_TLB_PSI_FLUSH:
1314 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1315 /* IH bit is passed in as part of address */
1316 val_iva = size_order | addr;
1321 /* Note: set drain read/write */
1324 * This is probably to be super secure.. Looks like we can
1325 * ignore it without any impact.
1327 if (cap_read_drain(iommu->cap))
1328 val |= DMA_TLB_READ_DRAIN;
1330 if (cap_write_drain(iommu->cap))
1331 val |= DMA_TLB_WRITE_DRAIN;
1333 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1334 /* Note: Only uses first TLB reg currently */
1336 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1337 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1339 /* Make sure hardware complete it */
1340 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1341 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1343 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1345 /* check IOTLB invalidation granularity */
1346 if (DMA_TLB_IAIG(val) == 0)
1347 pr_err("Flush IOTLB failed\n");
1348 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1349 pr_debug("TLB flush request %Lx, actual %Lx\n",
1350 (unsigned long long)DMA_TLB_IIRG(type),
1351 (unsigned long long)DMA_TLB_IAIG(val));
1354 static struct device_domain_info *
1355 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1359 unsigned long flags;
1360 struct device_domain_info *info;
1361 struct pci_dev *pdev;
1363 if (!ecap_dev_iotlb_support(iommu->ecap))
1369 spin_lock_irqsave(&device_domain_lock, flags);
1370 list_for_each_entry(info, &domain->devices, link)
1371 if (info->iommu == iommu && info->bus == bus &&
1372 info->devfn == devfn) {
1376 spin_unlock_irqrestore(&device_domain_lock, flags);
1378 if (!found || !info->dev || !dev_is_pci(info->dev))
1381 pdev = to_pci_dev(info->dev);
1383 if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1386 if (!dmar_find_matched_atsr_unit(pdev))
1392 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1394 if (!info || !dev_is_pci(info->dev))
1397 pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1400 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1402 if (!info->dev || !dev_is_pci(info->dev) ||
1403 !pci_ats_enabled(to_pci_dev(info->dev)))
1406 pci_disable_ats(to_pci_dev(info->dev));
1409 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1410 u64 addr, unsigned mask)
1413 unsigned long flags;
1414 struct device_domain_info *info;
1416 spin_lock_irqsave(&device_domain_lock, flags);
1417 list_for_each_entry(info, &domain->devices, link) {
1418 struct pci_dev *pdev;
1419 if (!info->dev || !dev_is_pci(info->dev))
1422 pdev = to_pci_dev(info->dev);
1423 if (!pci_ats_enabled(pdev))
1426 sid = info->bus << 8 | info->devfn;
1427 qdep = pci_ats_queue_depth(pdev);
1428 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1430 spin_unlock_irqrestore(&device_domain_lock, flags);
1433 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1434 unsigned long pfn, unsigned int pages, int ih, int map)
1436 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1437 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1444 * Fallback to domain selective flush if no PSI support or the size is
1446 * PSI requires page size to be 2 ^ x, and the base address is naturally
1447 * aligned to the size
1449 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1450 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1453 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1457 * In caching mode, changes of pages from non-present to present require
1458 * flush. However, device IOTLB doesn't need to be flushed in this case.
1460 if (!cap_caching_mode(iommu->cap) || !map)
1461 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1464 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1467 unsigned long flags;
1469 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1470 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1471 pmen &= ~DMA_PMEN_EPM;
1472 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1474 /* wait for the protected region status bit to clear */
1475 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1476 readl, !(pmen & DMA_PMEN_PRS), pmen);
1478 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1481 static void iommu_enable_translation(struct intel_iommu *iommu)
1484 unsigned long flags;
1486 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1487 iommu->gcmd |= DMA_GCMD_TE;
1488 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1490 /* Make sure hardware complete it */
1491 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1492 readl, (sts & DMA_GSTS_TES), sts);
1494 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1497 static void iommu_disable_translation(struct intel_iommu *iommu)
1502 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1503 iommu->gcmd &= ~DMA_GCMD_TE;
1504 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1506 /* Make sure hardware complete it */
1507 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1508 readl, (!(sts & DMA_GSTS_TES)), sts);
1510 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1514 static int iommu_init_domains(struct intel_iommu *iommu)
1516 unsigned long ndomains;
1517 unsigned long nlongs;
1519 ndomains = cap_ndoms(iommu->cap);
1520 pr_debug("%s: Number of Domains supported <%ld>\n",
1521 iommu->name, ndomains);
1522 nlongs = BITS_TO_LONGS(ndomains);
1524 spin_lock_init(&iommu->lock);
1526 /* TBD: there might be 64K domains,
1527 * consider other allocation for future chip
1529 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1530 if (!iommu->domain_ids) {
1531 pr_err("%s: Allocating domain id array failed\n",
1535 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1537 if (!iommu->domains) {
1538 pr_err("%s: Allocating domain array failed\n",
1540 kfree(iommu->domain_ids);
1541 iommu->domain_ids = NULL;
1546 * if Caching mode is set, then invalid translations are tagged
1547 * with domainid 0. Hence we need to pre-allocate it.
1549 if (cap_caching_mode(iommu->cap))
1550 set_bit(0, iommu->domain_ids);
1554 static void disable_dmar_iommu(struct intel_iommu *iommu)
1556 struct dmar_domain *domain;
1559 if ((iommu->domains) && (iommu->domain_ids)) {
1560 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1562 * Domain id 0 is reserved for invalid translation
1563 * if hardware supports caching mode.
1565 if (cap_caching_mode(iommu->cap) && i == 0)
1568 domain = iommu->domains[i];
1569 clear_bit(i, iommu->domain_ids);
1570 if (domain_detach_iommu(domain, iommu) == 0 &&
1571 !domain_type_is_vm(domain))
1572 domain_exit(domain);
1576 if (iommu->gcmd & DMA_GCMD_TE)
1577 iommu_disable_translation(iommu);
1580 static void free_dmar_iommu(struct intel_iommu *iommu)
1582 if ((iommu->domains) && (iommu->domain_ids)) {
1583 kfree(iommu->domains);
1584 kfree(iommu->domain_ids);
1585 iommu->domains = NULL;
1586 iommu->domain_ids = NULL;
1589 g_iommus[iommu->seq_id] = NULL;
1591 /* free context mapping */
1592 free_context_table(iommu);
1595 static struct dmar_domain *alloc_domain(int flags)
1597 /* domain id for virtual machine, it won't be set in context */
1598 static atomic_t vm_domid = ATOMIC_INIT(0);
1599 struct dmar_domain *domain;
1601 domain = alloc_domain_mem();
1605 memset(domain, 0, sizeof(*domain));
1607 domain->flags = flags;
1608 spin_lock_init(&domain->iommu_lock);
1609 INIT_LIST_HEAD(&domain->devices);
1610 if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1611 domain->id = atomic_inc_return(&vm_domid);
1616 static int __iommu_attach_domain(struct dmar_domain *domain,
1617 struct intel_iommu *iommu)
1620 unsigned long ndomains;
1622 ndomains = cap_ndoms(iommu->cap);
1623 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1624 if (num < ndomains) {
1625 set_bit(num, iommu->domain_ids);
1626 iommu->domains[num] = domain;
1634 static int iommu_attach_domain(struct dmar_domain *domain,
1635 struct intel_iommu *iommu)
1638 unsigned long flags;
1640 spin_lock_irqsave(&iommu->lock, flags);
1641 num = __iommu_attach_domain(domain, iommu);
1642 spin_unlock_irqrestore(&iommu->lock, flags);
1644 pr_err("%s: No free domain ids\n", iommu->name);
1649 static int iommu_attach_vm_domain(struct dmar_domain *domain,
1650 struct intel_iommu *iommu)
1653 unsigned long ndomains;
1655 ndomains = cap_ndoms(iommu->cap);
1656 for_each_set_bit(num, iommu->domain_ids, ndomains)
1657 if (iommu->domains[num] == domain)
1660 return __iommu_attach_domain(domain, iommu);
1663 static void iommu_detach_domain(struct dmar_domain *domain,
1664 struct intel_iommu *iommu)
1666 unsigned long flags;
1669 spin_lock_irqsave(&iommu->lock, flags);
1670 if (domain_type_is_vm_or_si(domain)) {
1671 ndomains = cap_ndoms(iommu->cap);
1672 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1673 if (iommu->domains[num] == domain) {
1674 clear_bit(num, iommu->domain_ids);
1675 iommu->domains[num] = NULL;
1680 clear_bit(domain->id, iommu->domain_ids);
1681 iommu->domains[domain->id] = NULL;
1683 spin_unlock_irqrestore(&iommu->lock, flags);
1686 static void domain_attach_iommu(struct dmar_domain *domain,
1687 struct intel_iommu *iommu)
1689 unsigned long flags;
1691 spin_lock_irqsave(&domain->iommu_lock, flags);
1692 if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1693 domain->iommu_count++;
1694 if (domain->iommu_count == 1)
1695 domain->nid = iommu->node;
1696 domain_update_iommu_cap(domain);
1698 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1701 static int domain_detach_iommu(struct dmar_domain *domain,
1702 struct intel_iommu *iommu)
1704 unsigned long flags;
1705 int count = INT_MAX;
1707 spin_lock_irqsave(&domain->iommu_lock, flags);
1708 if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1709 count = --domain->iommu_count;
1710 domain_update_iommu_cap(domain);
1712 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1717 static struct iova_domain reserved_iova_list;
1718 static struct lock_class_key reserved_rbtree_key;
1720 static int dmar_init_reserved_ranges(void)
1722 struct pci_dev *pdev = NULL;
1726 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1729 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1730 &reserved_rbtree_key);
1732 /* IOAPIC ranges shouldn't be accessed by DMA */
1733 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1734 IOVA_PFN(IOAPIC_RANGE_END));
1736 pr_err("Reserve IOAPIC range failed\n");
1740 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1741 for_each_pci_dev(pdev) {
1744 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1745 r = &pdev->resource[i];
1746 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1748 iova = reserve_iova(&reserved_iova_list,
1752 pr_err("Reserve iova failed\n");
1760 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1762 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1765 static inline int guestwidth_to_adjustwidth(int gaw)
1768 int r = (gaw - 12) % 9;
1779 static int domain_init(struct dmar_domain *domain, int guest_width)
1781 struct intel_iommu *iommu;
1782 int adjust_width, agaw;
1783 unsigned long sagaw;
1785 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1787 domain_reserve_special_ranges(domain);
1789 /* calculate AGAW */
1790 iommu = domain_get_iommu(domain);
1791 if (guest_width > cap_mgaw(iommu->cap))
1792 guest_width = cap_mgaw(iommu->cap);
1793 domain->gaw = guest_width;
1794 adjust_width = guestwidth_to_adjustwidth(guest_width);
1795 agaw = width_to_agaw(adjust_width);
1796 sagaw = cap_sagaw(iommu->cap);
1797 if (!test_bit(agaw, &sagaw)) {
1798 /* hardware doesn't support it, choose a bigger one */
1799 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1800 agaw = find_next_bit(&sagaw, 5, agaw);
1804 domain->agaw = agaw;
1806 if (ecap_coherent(iommu->ecap))
1807 domain->iommu_coherency = 1;
1809 domain->iommu_coherency = 0;
1811 if (ecap_sc_support(iommu->ecap))
1812 domain->iommu_snooping = 1;
1814 domain->iommu_snooping = 0;
1816 if (intel_iommu_superpage)
1817 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1819 domain->iommu_superpage = 0;
1821 domain->nid = iommu->node;
1823 /* always allocate the top pgd */
1824 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1827 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1831 static void domain_exit(struct dmar_domain *domain)
1833 struct page *freelist = NULL;
1836 /* Domain 0 is reserved, so dont process it */
1840 /* Flush any lazy unmaps that may reference this domain */
1841 if (!intel_iommu_strict)
1842 flush_unmaps_timeout(0);
1844 /* remove associated devices */
1845 domain_remove_dev_info(domain);
1848 put_iova_domain(&domain->iovad);
1850 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1852 /* clear attached or cached domains */
1854 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1855 iommu_detach_domain(domain, g_iommus[i]);
1858 dma_free_pagelist(freelist);
1860 free_domain_mem(domain);
1863 static int domain_context_mapping_one(struct dmar_domain *domain,
1864 struct intel_iommu *iommu,
1865 u8 bus, u8 devfn, int translation)
1867 struct context_entry *context;
1868 unsigned long flags;
1869 struct dma_pte *pgd;
1872 struct device_domain_info *info = NULL;
1874 pr_debug("Set context mapping for %02x:%02x.%d\n",
1875 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1877 BUG_ON(!domain->pgd);
1878 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1879 translation != CONTEXT_TT_MULTI_LEVEL);
1881 spin_lock_irqsave(&iommu->lock, flags);
1882 context = iommu_context_addr(iommu, bus, devfn, 1);
1883 spin_unlock_irqrestore(&iommu->lock, flags);
1886 spin_lock_irqsave(&iommu->lock, flags);
1887 if (context_present(context)) {
1888 spin_unlock_irqrestore(&iommu->lock, flags);
1892 context_clear_entry(context);
1897 if (domain_type_is_vm_or_si(domain)) {
1898 if (domain_type_is_vm(domain)) {
1899 id = iommu_attach_vm_domain(domain, iommu);
1901 spin_unlock_irqrestore(&iommu->lock, flags);
1902 pr_err("%s: No free domain ids\n", iommu->name);
1907 /* Skip top levels of page tables for
1908 * iommu which has less agaw than default.
1909 * Unnecessary for PT mode.
1911 if (translation != CONTEXT_TT_PASS_THROUGH) {
1912 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1913 pgd = phys_to_virt(dma_pte_addr(pgd));
1914 if (!dma_pte_present(pgd)) {
1915 spin_unlock_irqrestore(&iommu->lock, flags);
1922 context_set_domain_id(context, id);
1924 if (translation != CONTEXT_TT_PASS_THROUGH) {
1925 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1926 translation = info ? CONTEXT_TT_DEV_IOTLB :
1927 CONTEXT_TT_MULTI_LEVEL;
1930 * In pass through mode, AW must be programmed to indicate the largest
1931 * AGAW value supported by hardware. And ASR is ignored by hardware.
1933 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1934 context_set_address_width(context, iommu->msagaw);
1936 context_set_address_root(context, virt_to_phys(pgd));
1937 context_set_address_width(context, iommu->agaw);
1940 context_set_translation_type(context, translation);
1941 context_set_fault_enable(context);
1942 context_set_present(context);
1943 domain_flush_cache(domain, context, sizeof(*context));
1946 * It's a non-present to present mapping. If hardware doesn't cache
1947 * non-present entry we only need to flush the write-buffer. If the
1948 * _does_ cache non-present entries, then it does so in the special
1949 * domain #0, which we have to flush:
1951 if (cap_caching_mode(iommu->cap)) {
1952 iommu->flush.flush_context(iommu, 0,
1953 (((u16)bus) << 8) | devfn,
1954 DMA_CCMD_MASK_NOBIT,
1955 DMA_CCMD_DEVICE_INVL);
1956 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
1958 iommu_flush_write_buffer(iommu);
1960 iommu_enable_dev_iotlb(info);
1961 spin_unlock_irqrestore(&iommu->lock, flags);
1963 domain_attach_iommu(domain, iommu);
1968 struct domain_context_mapping_data {
1969 struct dmar_domain *domain;
1970 struct intel_iommu *iommu;
1974 static int domain_context_mapping_cb(struct pci_dev *pdev,
1975 u16 alias, void *opaque)
1977 struct domain_context_mapping_data *data = opaque;
1979 return domain_context_mapping_one(data->domain, data->iommu,
1980 PCI_BUS_NUM(alias), alias & 0xff,
1985 domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1988 struct intel_iommu *iommu;
1990 struct domain_context_mapping_data data;
1992 iommu = device_to_iommu(dev, &bus, &devfn);
1996 if (!dev_is_pci(dev))
1997 return domain_context_mapping_one(domain, iommu, bus, devfn,
2000 data.domain = domain;
2002 data.translation = translation;
2004 return pci_for_each_dma_alias(to_pci_dev(dev),
2005 &domain_context_mapping_cb, &data);
2008 static int domain_context_mapped_cb(struct pci_dev *pdev,
2009 u16 alias, void *opaque)
2011 struct intel_iommu *iommu = opaque;
2013 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2016 static int domain_context_mapped(struct device *dev)
2018 struct intel_iommu *iommu;
2021 iommu = device_to_iommu(dev, &bus, &devfn);
2025 if (!dev_is_pci(dev))
2026 return device_context_mapped(iommu, bus, devfn);
2028 return !pci_for_each_dma_alias(to_pci_dev(dev),
2029 domain_context_mapped_cb, iommu);
2032 /* Returns a number of VTD pages, but aligned to MM page size */
2033 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2036 host_addr &= ~PAGE_MASK;
2037 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2040 /* Return largest possible superpage level for a given mapping */
2041 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2042 unsigned long iov_pfn,
2043 unsigned long phy_pfn,
2044 unsigned long pages)
2046 int support, level = 1;
2047 unsigned long pfnmerge;
2049 support = domain->iommu_superpage;
2051 /* To use a large page, the virtual *and* physical addresses
2052 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2053 of them will mean we have to use smaller pages. So just
2054 merge them and check both at once. */
2055 pfnmerge = iov_pfn | phy_pfn;
2057 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2058 pages >>= VTD_STRIDE_SHIFT;
2061 pfnmerge >>= VTD_STRIDE_SHIFT;
2068 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2069 struct scatterlist *sg, unsigned long phys_pfn,
2070 unsigned long nr_pages, int prot)
2072 struct dma_pte *first_pte = NULL, *pte = NULL;
2073 phys_addr_t uninitialized_var(pteval);
2074 unsigned long sg_res = 0;
2075 unsigned int largepage_lvl = 0;
2076 unsigned long lvl_pages = 0;
2078 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2080 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2083 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2087 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2090 while (nr_pages > 0) {
2094 sg_res = aligned_nrpages(sg->offset, sg->length);
2095 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2096 sg->dma_length = sg->length;
2097 pteval = page_to_phys(sg_page(sg)) | prot;
2098 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2102 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2104 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2107 /* It is large page*/
2108 if (largepage_lvl > 1) {
2109 pteval |= DMA_PTE_LARGE_PAGE;
2110 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2112 * Ensure that old small page tables are
2113 * removed to make room for superpage,
2116 dma_pte_free_pagetable(domain, iov_pfn,
2117 iov_pfn + lvl_pages - 1);
2119 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2123 /* We don't need lock here, nobody else
2124 * touches the iova range
2126 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2128 static int dumps = 5;
2129 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2130 iov_pfn, tmp, (unsigned long long)pteval);
2133 debug_dma_dump_mappings(NULL);
2138 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2140 BUG_ON(nr_pages < lvl_pages);
2141 BUG_ON(sg_res < lvl_pages);
2143 nr_pages -= lvl_pages;
2144 iov_pfn += lvl_pages;
2145 phys_pfn += lvl_pages;
2146 pteval += lvl_pages * VTD_PAGE_SIZE;
2147 sg_res -= lvl_pages;
2149 /* If the next PTE would be the first in a new page, then we
2150 need to flush the cache on the entries we've just written.
2151 And then we'll need to recalculate 'pte', so clear it and
2152 let it get set again in the if (!pte) block above.
2154 If we're done (!nr_pages) we need to flush the cache too.
2156 Also if we've been setting superpages, we may need to
2157 recalculate 'pte' and switch back to smaller pages for the
2158 end of the mapping, if the trailing size is not enough to
2159 use another superpage (i.e. sg_res < lvl_pages). */
2161 if (!nr_pages || first_pte_in_page(pte) ||
2162 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2163 domain_flush_cache(domain, first_pte,
2164 (void *)pte - (void *)first_pte);
2168 if (!sg_res && nr_pages)
2174 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2175 struct scatterlist *sg, unsigned long nr_pages,
2178 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2181 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2182 unsigned long phys_pfn, unsigned long nr_pages,
2185 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2188 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2193 clear_context_table(iommu, bus, devfn);
2194 iommu->flush.flush_context(iommu, 0, 0, 0,
2195 DMA_CCMD_GLOBAL_INVL);
2196 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2199 static inline void unlink_domain_info(struct device_domain_info *info)
2201 assert_spin_locked(&device_domain_lock);
2202 list_del(&info->link);
2203 list_del(&info->global);
2205 info->dev->archdata.iommu = NULL;
2208 static void domain_remove_dev_info(struct dmar_domain *domain)
2210 struct device_domain_info *info, *tmp;
2211 unsigned long flags;
2213 spin_lock_irqsave(&device_domain_lock, flags);
2214 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
2215 unlink_domain_info(info);
2216 spin_unlock_irqrestore(&device_domain_lock, flags);
2218 iommu_disable_dev_iotlb(info);
2219 iommu_detach_dev(info->iommu, info->bus, info->devfn);
2221 if (domain_type_is_vm(domain)) {
2222 iommu_detach_dependent_devices(info->iommu, info->dev);
2223 domain_detach_iommu(domain, info->iommu);
2226 free_devinfo_mem(info);
2227 spin_lock_irqsave(&device_domain_lock, flags);
2229 spin_unlock_irqrestore(&device_domain_lock, flags);
2234 * Note: we use struct device->archdata.iommu stores the info
2236 static struct dmar_domain *find_domain(struct device *dev)
2238 struct device_domain_info *info;
2240 /* No lock here, assumes no domain exit in normal case */
2241 info = dev->archdata.iommu;
2243 return info->domain;
2247 static inline struct device_domain_info *
2248 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2250 struct device_domain_info *info;
2252 list_for_each_entry(info, &device_domain_list, global)
2253 if (info->iommu->segment == segment && info->bus == bus &&
2254 info->devfn == devfn)
2260 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2263 struct dmar_domain *domain)
2265 struct dmar_domain *found = NULL;
2266 struct device_domain_info *info;
2267 unsigned long flags;
2269 info = alloc_devinfo_mem();
2274 info->devfn = devfn;
2276 info->domain = domain;
2277 info->iommu = iommu;
2279 spin_lock_irqsave(&device_domain_lock, flags);
2281 found = find_domain(dev);
2283 struct device_domain_info *info2;
2284 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2286 found = info2->domain;
2289 spin_unlock_irqrestore(&device_domain_lock, flags);
2290 free_devinfo_mem(info);
2291 /* Caller must free the original domain */
2295 list_add(&info->link, &domain->devices);
2296 list_add(&info->global, &device_domain_list);
2298 dev->archdata.iommu = info;
2299 spin_unlock_irqrestore(&device_domain_lock, flags);
2304 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2306 *(u16 *)opaque = alias;
2310 /* domain is initialized */
2311 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2313 struct dmar_domain *domain, *tmp;
2314 struct intel_iommu *iommu;
2315 struct device_domain_info *info;
2317 unsigned long flags;
2320 domain = find_domain(dev);
2324 iommu = device_to_iommu(dev, &bus, &devfn);
2328 if (dev_is_pci(dev)) {
2329 struct pci_dev *pdev = to_pci_dev(dev);
2331 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2333 spin_lock_irqsave(&device_domain_lock, flags);
2334 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2335 PCI_BUS_NUM(dma_alias),
2338 iommu = info->iommu;
2339 domain = info->domain;
2341 spin_unlock_irqrestore(&device_domain_lock, flags);
2343 /* DMA alias already has a domain, uses it */
2348 /* Allocate and initialize new domain for the device */
2349 domain = alloc_domain(0);
2352 domain->id = iommu_attach_domain(domain, iommu);
2353 if (domain->id < 0) {
2354 free_domain_mem(domain);
2357 domain_attach_iommu(domain, iommu);
2358 if (domain_init(domain, gaw)) {
2359 domain_exit(domain);
2363 /* register PCI DMA alias device */
2364 if (dev_is_pci(dev)) {
2365 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2366 dma_alias & 0xff, NULL, domain);
2368 if (!tmp || tmp != domain) {
2369 domain_exit(domain);
2378 tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2380 if (!tmp || tmp != domain) {
2381 domain_exit(domain);
2388 static int iommu_identity_mapping;
2389 #define IDENTMAP_ALL 1
2390 #define IDENTMAP_GFX 2
2391 #define IDENTMAP_AZALIA 4
2393 static int iommu_domain_identity_map(struct dmar_domain *domain,
2394 unsigned long long start,
2395 unsigned long long end)
2397 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2398 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2400 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2401 dma_to_mm_pfn(last_vpfn))) {
2402 pr_err("Reserving iova failed\n");
2406 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2407 start, end, domain->id);
2409 * RMRR range might have overlap with physical memory range,
2412 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2414 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2415 last_vpfn - first_vpfn + 1,
2416 DMA_PTE_READ|DMA_PTE_WRITE);
2419 static int iommu_prepare_identity_map(struct device *dev,
2420 unsigned long long start,
2421 unsigned long long end)
2423 struct dmar_domain *domain;
2426 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2430 /* For _hardware_ passthrough, don't bother. But for software
2431 passthrough, we do it anyway -- it may indicate a memory
2432 range which is reserved in E820, so which didn't get set
2433 up to start with in si_domain */
2434 if (domain == si_domain && hw_pass_through) {
2435 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2436 dev_name(dev), start, end);
2440 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2441 dev_name(dev), start, end);
2444 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2445 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2446 dmi_get_system_info(DMI_BIOS_VENDOR),
2447 dmi_get_system_info(DMI_BIOS_VERSION),
2448 dmi_get_system_info(DMI_PRODUCT_VERSION));
2453 if (end >> agaw_to_width(domain->agaw)) {
2454 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2455 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2456 agaw_to_width(domain->agaw),
2457 dmi_get_system_info(DMI_BIOS_VENDOR),
2458 dmi_get_system_info(DMI_BIOS_VERSION),
2459 dmi_get_system_info(DMI_PRODUCT_VERSION));
2464 ret = iommu_domain_identity_map(domain, start, end);
2468 /* context entry init */
2469 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2476 domain_exit(domain);
2480 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2483 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2485 return iommu_prepare_identity_map(dev, rmrr->base_address,
2489 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2490 static inline void iommu_prepare_isa(void)
2492 struct pci_dev *pdev;
2495 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2499 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2500 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2503 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2508 static inline void iommu_prepare_isa(void)
2512 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2514 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2516 static int __init si_domain_init(int hw)
2518 struct dmar_drhd_unit *drhd;
2519 struct intel_iommu *iommu;
2523 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2527 for_each_active_iommu(iommu, drhd) {
2528 ret = iommu_attach_domain(si_domain, iommu);
2530 domain_exit(si_domain);
2533 si_domain->id = ret;
2535 } else if (si_domain->id != ret) {
2536 domain_exit(si_domain);
2539 domain_attach_iommu(si_domain, iommu);
2542 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2543 domain_exit(si_domain);
2547 pr_debug("Identity mapping domain is domain %d\n",
2553 for_each_online_node(nid) {
2554 unsigned long start_pfn, end_pfn;
2557 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2558 ret = iommu_domain_identity_map(si_domain,
2559 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2568 static int identity_mapping(struct device *dev)
2570 struct device_domain_info *info;
2572 if (likely(!iommu_identity_mapping))
2575 info = dev->archdata.iommu;
2576 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2577 return (info->domain == si_domain);
2582 static int domain_add_dev_info(struct dmar_domain *domain,
2583 struct device *dev, int translation)
2585 struct dmar_domain *ndomain;
2586 struct intel_iommu *iommu;
2590 iommu = device_to_iommu(dev, &bus, &devfn);
2594 ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2595 if (ndomain != domain)
2598 ret = domain_context_mapping(domain, dev, translation);
2600 domain_remove_one_dev_info(domain, dev);
2607 static bool device_has_rmrr(struct device *dev)
2609 struct dmar_rmrr_unit *rmrr;
2614 for_each_rmrr_units(rmrr) {
2616 * Return TRUE if this RMRR contains the device that
2619 for_each_active_dev_scope(rmrr->devices,
2620 rmrr->devices_cnt, i, tmp)
2631 * There are a couple cases where we need to restrict the functionality of
2632 * devices associated with RMRRs. The first is when evaluating a device for
2633 * identity mapping because problems exist when devices are moved in and out
2634 * of domains and their respective RMRR information is lost. This means that
2635 * a device with associated RMRRs will never be in a "passthrough" domain.
2636 * The second is use of the device through the IOMMU API. This interface
2637 * expects to have full control of the IOVA space for the device. We cannot
2638 * satisfy both the requirement that RMRR access is maintained and have an
2639 * unencumbered IOVA space. We also have no ability to quiesce the device's
2640 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2641 * We therefore prevent devices associated with an RMRR from participating in
2642 * the IOMMU API, which eliminates them from device assignment.
2644 * In both cases we assume that PCI USB devices with RMRRs have them largely
2645 * for historical reasons and that the RMRR space is not actively used post
2646 * boot. This exclusion may change if vendors begin to abuse it.
2648 * The same exception is made for graphics devices, with the requirement that
2649 * any use of the RMRR regions will be torn down before assigning the device
2652 static bool device_is_rmrr_locked(struct device *dev)
2654 if (!device_has_rmrr(dev))
2657 if (dev_is_pci(dev)) {
2658 struct pci_dev *pdev = to_pci_dev(dev);
2660 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2667 static int iommu_should_identity_map(struct device *dev, int startup)
2670 if (dev_is_pci(dev)) {
2671 struct pci_dev *pdev = to_pci_dev(dev);
2673 if (device_is_rmrr_locked(dev))
2676 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2679 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2682 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2686 * We want to start off with all devices in the 1:1 domain, and
2687 * take them out later if we find they can't access all of memory.
2689 * However, we can't do this for PCI devices behind bridges,
2690 * because all PCI devices behind the same bridge will end up
2691 * with the same source-id on their transactions.
2693 * Practically speaking, we can't change things around for these
2694 * devices at run-time, because we can't be sure there'll be no
2695 * DMA transactions in flight for any of their siblings.
2697 * So PCI devices (unless they're on the root bus) as well as
2698 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2699 * the 1:1 domain, just in _case_ one of their siblings turns out
2700 * not to be able to map all of memory.
2702 if (!pci_is_pcie(pdev)) {
2703 if (!pci_is_root_bus(pdev->bus))
2705 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2707 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2710 if (device_has_rmrr(dev))
2715 * At boot time, we don't yet know if devices will be 64-bit capable.
2716 * Assume that they will — if they turn out not to be, then we can
2717 * take them out of the 1:1 domain later.
2721 * If the device's dma_mask is less than the system's memory
2722 * size then this is not a candidate for identity mapping.
2724 u64 dma_mask = *dev->dma_mask;
2726 if (dev->coherent_dma_mask &&
2727 dev->coherent_dma_mask < dma_mask)
2728 dma_mask = dev->coherent_dma_mask;
2730 return dma_mask >= dma_get_required_mask(dev);
2736 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2740 if (!iommu_should_identity_map(dev, 1))
2743 ret = domain_add_dev_info(si_domain, dev,
2744 hw ? CONTEXT_TT_PASS_THROUGH :
2745 CONTEXT_TT_MULTI_LEVEL);
2747 pr_info("%s identity mapping for device %s\n",
2748 hw ? "Hardware" : "Software", dev_name(dev));
2749 else if (ret == -ENODEV)
2750 /* device not associated with an iommu */
2757 static int __init iommu_prepare_static_identity_mapping(int hw)
2759 struct pci_dev *pdev = NULL;
2760 struct dmar_drhd_unit *drhd;
2761 struct intel_iommu *iommu;
2766 ret = si_domain_init(hw);
2770 for_each_pci_dev(pdev) {
2771 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2776 for_each_active_iommu(iommu, drhd)
2777 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2778 struct acpi_device_physical_node *pn;
2779 struct acpi_device *adev;
2781 if (dev->bus != &acpi_bus_type)
2784 adev= to_acpi_device(dev);
2785 mutex_lock(&adev->physical_node_lock);
2786 list_for_each_entry(pn, &adev->physical_node_list, node) {
2787 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2791 mutex_unlock(&adev->physical_node_lock);
2799 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2802 * Start from the sane iommu hardware state.
2803 * If the queued invalidation is already initialized by us
2804 * (for example, while enabling interrupt-remapping) then
2805 * we got the things already rolling from a sane state.
2809 * Clear any previous faults.
2811 dmar_fault(-1, iommu);
2813 * Disable queued invalidation if supported and already enabled
2814 * before OS handover.
2816 dmar_disable_qi(iommu);
2819 if (dmar_enable_qi(iommu)) {
2821 * Queued Invalidate not enabled, use Register Based Invalidate
2823 iommu->flush.flush_context = __iommu_flush_context;
2824 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2825 pr_info("%s: Using Register based invalidation\n",
2828 iommu->flush.flush_context = qi_flush_context;
2829 iommu->flush.flush_iotlb = qi_flush_iotlb;
2830 pr_info("%s: Using Queued invalidation\n", iommu->name);
2834 static int copy_context_table(struct intel_iommu *iommu,
2835 struct root_entry *old_re,
2836 struct context_entry **tbl,
2839 struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
2840 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2841 phys_addr_t old_ce_phys;
2843 tbl_idx = ext ? bus * 2 : bus;
2845 for (devfn = 0; devfn < 256; devfn++) {
2846 /* First calculate the correct index */
2847 idx = (ext ? devfn * 2 : devfn) % 256;
2850 /* First save what we may have and clean up */
2852 tbl[tbl_idx] = new_ce;
2853 __iommu_flush_cache(iommu, new_ce,
2863 old_ce_phys = root_entry_lctp(old_re);
2865 old_ce_phys = root_entry_uctp(old_re);
2868 if (ext && devfn == 0) {
2869 /* No LCTP, try UCTP */
2878 old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
2882 new_ce = alloc_pgtable_page(iommu->node);
2889 /* Now copy the context entry */
2892 if (!__context_present(&ce))
2895 did = context_domain_id(&ce);
2896 if (did >= 0 && did < cap_ndoms(iommu->cap))
2897 set_bit(did, iommu->domain_ids);
2900 * We need a marker for copied context entries. This
2901 * marker needs to work for the old format as well as
2902 * for extended context entries.
2904 * Bit 67 of the context entry is used. In the old
2905 * format this bit is available to software, in the
2906 * extended format it is the PGE bit, but PGE is ignored
2907 * by HW if PASIDs are disabled (and thus still
2910 * So disable PASIDs first and then mark the entry
2911 * copied. This means that we don't copy PASID
2912 * translations from the old kernel, but this is fine as
2913 * faults there are not fatal.
2915 context_clear_pasid_enable(&ce);
2916 context_set_copied(&ce);
2921 tbl[tbl_idx + pos] = new_ce;
2923 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
2932 static int copy_translation_tables(struct intel_iommu *iommu)
2934 struct context_entry **ctxt_tbls;
2935 struct root_entry *old_rt;
2936 phys_addr_t old_rt_phys;
2937 int ctxt_table_entries;
2938 unsigned long flags;
2943 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
2944 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
2946 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
2950 old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
2954 /* This is too big for the stack - allocate it from slab */
2955 ctxt_table_entries = ext ? 512 : 256;
2957 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
2961 for (bus = 0; bus < 256; bus++) {
2962 ret = copy_context_table(iommu, &old_rt[bus],
2963 ctxt_tbls, bus, ext);
2965 pr_err("%s: Failed to copy context table for bus %d\n",
2971 spin_lock_irqsave(&iommu->lock, flags);
2973 /* Context tables are copied, now write them to the root_entry table */
2974 for (bus = 0; bus < 256; bus++) {
2975 int idx = ext ? bus * 2 : bus;
2978 if (ctxt_tbls[idx]) {
2979 val = virt_to_phys(ctxt_tbls[idx]) | 1;
2980 iommu->root_entry[bus].lo = val;
2983 if (!ext || !ctxt_tbls[idx + 1])
2986 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
2987 iommu->root_entry[bus].hi = val;
2990 spin_unlock_irqrestore(&iommu->lock, flags);
2994 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3004 static int __init init_dmars(void)
3006 struct dmar_drhd_unit *drhd;
3007 struct dmar_rmrr_unit *rmrr;
3009 struct intel_iommu *iommu;
3015 * initialize and program root entry to not present
3018 for_each_drhd_unit(drhd) {
3020 * lock not needed as this is only incremented in the single
3021 * threaded kernel __init code path all other access are read
3024 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3028 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3031 /* Preallocate enough resources for IOMMU hot-addition */
3032 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3033 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3035 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3038 pr_err("Allocating global iommu array failed\n");
3043 deferred_flush = kzalloc(g_num_of_iommus *
3044 sizeof(struct deferred_flush_tables), GFP_KERNEL);
3045 if (!deferred_flush) {
3050 for_each_active_iommu(iommu, drhd) {
3051 g_iommus[iommu->seq_id] = iommu;
3053 intel_iommu_init_qi(iommu);
3055 ret = iommu_init_domains(iommu);
3059 init_translation_status(iommu);
3061 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3062 iommu_disable_translation(iommu);
3063 clear_translation_pre_enabled(iommu);
3064 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3070 * we could share the same root & context tables
3071 * among all IOMMU's. Need to Split it later.
3073 ret = iommu_alloc_root_entry(iommu);
3077 if (translation_pre_enabled(iommu)) {
3078 pr_info("Translation already enabled - trying to copy translation structures\n");
3080 ret = copy_translation_tables(iommu);
3083 * We found the IOMMU with translation
3084 * enabled - but failed to copy over the
3085 * old root-entry table. Try to proceed
3086 * by disabling translation now and
3087 * allocating a clean root-entry table.
3088 * This might cause DMAR faults, but
3089 * probably the dump will still succeed.
3091 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3093 iommu_disable_translation(iommu);
3094 clear_translation_pre_enabled(iommu);
3096 pr_info("Copied translation tables from previous kernel for %s\n",
3101 iommu_flush_write_buffer(iommu);
3102 iommu_set_root_entry(iommu);
3103 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3104 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3106 if (!ecap_pass_through(iommu->ecap))
3107 hw_pass_through = 0;
3110 if (iommu_pass_through)
3111 iommu_identity_mapping |= IDENTMAP_ALL;
3113 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3114 iommu_identity_mapping |= IDENTMAP_GFX;
3117 check_tylersburg_isoch();
3120 * If pass through is not set or not enabled, setup context entries for
3121 * identity mappings for rmrr, gfx, and isa and may fall back to static
3122 * identity mapping if iommu_identity_mapping is set.
3124 if (iommu_identity_mapping) {
3125 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3127 pr_crit("Failed to setup IOMMU pass-through\n");
3133 * for each dev attached to rmrr
3135 * locate drhd for dev, alloc domain for dev
3136 * allocate free domain
3137 * allocate page table entries for rmrr
3138 * if context not allocated for bus
3139 * allocate and init context
3140 * set present in root table for this bus
3141 * init context with domain, translation etc
3145 pr_info("Setting RMRR:\n");
3146 for_each_rmrr_units(rmrr) {
3147 /* some BIOS lists non-exist devices in DMAR table. */
3148 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3150 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3152 pr_err("Mapping reserved region failed\n");
3156 iommu_prepare_isa();
3161 * global invalidate context cache
3162 * global invalidate iotlb
3163 * enable translation
3165 for_each_iommu(iommu, drhd) {
3166 if (drhd->ignored) {
3168 * we always have to disable PMRs or DMA may fail on
3172 iommu_disable_protect_mem_regions(iommu);
3176 iommu_flush_write_buffer(iommu);
3178 ret = dmar_set_interrupt(iommu);
3182 iommu_enable_translation(iommu);
3183 iommu_disable_protect_mem_regions(iommu);
3189 for_each_active_iommu(iommu, drhd) {
3190 disable_dmar_iommu(iommu);
3191 free_dmar_iommu(iommu);
3193 kfree(deferred_flush);
3200 /* This takes a number of _MM_ pages, not VTD pages */
3201 static struct iova *intel_alloc_iova(struct device *dev,
3202 struct dmar_domain *domain,
3203 unsigned long nrpages, uint64_t dma_mask)
3205 struct iova *iova = NULL;
3207 /* Restrict dma_mask to the width that the iommu can handle */
3208 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3210 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3212 * First try to allocate an io virtual address in
3213 * DMA_BIT_MASK(32) and if that fails then try allocating
3216 iova = alloc_iova(&domain->iovad, nrpages,
3217 IOVA_PFN(DMA_BIT_MASK(32)), 1);
3221 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
3222 if (unlikely(!iova)) {
3223 pr_err("Allocating %ld-page iova for %s failed",
3224 nrpages, dev_name(dev));
3231 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3233 struct dmar_domain *domain;
3236 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3238 pr_err("Allocating domain for %s failed\n",
3243 /* make sure context mapping is ok */
3244 if (unlikely(!domain_context_mapped(dev))) {
3245 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
3247 pr_err("Domain context map for %s failed\n",
3256 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3258 struct device_domain_info *info;
3260 /* No lock here, assumes no domain exit in normal case */
3261 info = dev->archdata.iommu;
3263 return info->domain;
3265 return __get_valid_domain_for_dev(dev);
3268 /* Check if the dev needs to go through non-identity map and unmap process.*/
3269 static int iommu_no_mapping(struct device *dev)
3273 if (iommu_dummy(dev))
3276 if (!iommu_identity_mapping)
3279 found = identity_mapping(dev);
3281 if (iommu_should_identity_map(dev, 0))
3285 * 32 bit DMA is removed from si_domain and fall back
3286 * to non-identity mapping.
3288 domain_remove_one_dev_info(si_domain, dev);
3289 pr_info("32bit %s uses non-identity mapping\n",
3295 * In case of a detached 64 bit DMA device from vm, the device
3296 * is put into si_domain for identity mapping.
3298 if (iommu_should_identity_map(dev, 0)) {
3300 ret = domain_add_dev_info(si_domain, dev,
3302 CONTEXT_TT_PASS_THROUGH :
3303 CONTEXT_TT_MULTI_LEVEL);
3305 pr_info("64bit %s uses identity mapping\n",
3315 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3316 size_t size, int dir, u64 dma_mask)
3318 struct dmar_domain *domain;
3319 phys_addr_t start_paddr;
3323 struct intel_iommu *iommu;
3324 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3326 BUG_ON(dir == DMA_NONE);
3328 if (iommu_no_mapping(dev))
3331 domain = get_valid_domain_for_dev(dev);
3335 iommu = domain_get_iommu(domain);
3336 size = aligned_nrpages(paddr, size);
3338 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3343 * Check if DMAR supports zero-length reads on write only
3346 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3347 !cap_zlr(iommu->cap))
3348 prot |= DMA_PTE_READ;
3349 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3350 prot |= DMA_PTE_WRITE;
3352 * paddr - (paddr + size) might be partial page, we should map the whole
3353 * page. Note: if two part of one page are separately mapped, we
3354 * might have two guest_addr mapping to the same host paddr, but this
3355 * is not a big problem
3357 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3358 mm_to_dma_pfn(paddr_pfn), size, prot);
3362 /* it's a non-present to present mapping. Only flush if caching mode */
3363 if (cap_caching_mode(iommu->cap))
3364 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3366 iommu_flush_write_buffer(iommu);
3368 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3369 start_paddr += paddr & ~PAGE_MASK;
3374 __free_iova(&domain->iovad, iova);
3375 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3376 dev_name(dev), size, (unsigned long long)paddr, dir);
3380 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3381 unsigned long offset, size_t size,
3382 enum dma_data_direction dir,
3383 struct dma_attrs *attrs)
3385 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3386 dir, *dev->dma_mask);
3389 static void flush_unmaps(void)
3395 /* just flush them all */
3396 for (i = 0; i < g_num_of_iommus; i++) {
3397 struct intel_iommu *iommu = g_iommus[i];
3401 if (!deferred_flush[i].next)
3404 /* In caching mode, global flushes turn emulation expensive */
3405 if (!cap_caching_mode(iommu->cap))
3406 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3407 DMA_TLB_GLOBAL_FLUSH);
3408 for (j = 0; j < deferred_flush[i].next; j++) {
3410 struct iova *iova = deferred_flush[i].iova[j];
3411 struct dmar_domain *domain = deferred_flush[i].domain[j];
3413 /* On real hardware multiple invalidations are expensive */
3414 if (cap_caching_mode(iommu->cap))
3415 iommu_flush_iotlb_psi(iommu, domain->id,
3416 iova->pfn_lo, iova_size(iova),
3417 !deferred_flush[i].freelist[j], 0);
3419 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3420 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3421 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3423 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3424 if (deferred_flush[i].freelist[j])
3425 dma_free_pagelist(deferred_flush[i].freelist[j]);
3427 deferred_flush[i].next = 0;
3433 static void flush_unmaps_timeout(unsigned long data)
3435 unsigned long flags;
3437 spin_lock_irqsave(&async_umap_flush_lock, flags);
3439 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3442 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3444 unsigned long flags;
3446 struct intel_iommu *iommu;
3448 spin_lock_irqsave(&async_umap_flush_lock, flags);
3449 if (list_size == HIGH_WATER_MARK)
3452 iommu = domain_get_iommu(dom);
3453 iommu_id = iommu->seq_id;
3455 next = deferred_flush[iommu_id].next;
3456 deferred_flush[iommu_id].domain[next] = dom;
3457 deferred_flush[iommu_id].iova[next] = iova;
3458 deferred_flush[iommu_id].freelist[next] = freelist;
3459 deferred_flush[iommu_id].next++;
3462 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3466 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3469 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3471 struct dmar_domain *domain;
3472 unsigned long start_pfn, last_pfn;
3474 struct intel_iommu *iommu;
3475 struct page *freelist;
3477 if (iommu_no_mapping(dev))
3480 domain = find_domain(dev);
3483 iommu = domain_get_iommu(domain);
3485 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3486 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3487 (unsigned long long)dev_addr))
3490 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3491 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3493 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3494 dev_name(dev), start_pfn, last_pfn);
3496 freelist = domain_unmap(domain, start_pfn, last_pfn);
3498 if (intel_iommu_strict) {
3499 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3500 last_pfn - start_pfn + 1, !freelist, 0);
3502 __free_iova(&domain->iovad, iova);
3503 dma_free_pagelist(freelist);
3505 add_unmap(domain, iova, freelist);
3507 * queue up the release of the unmap to save the 1/6th of the
3508 * cpu used up by the iotlb flush operation...
3513 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3514 size_t size, enum dma_data_direction dir,
3515 struct dma_attrs *attrs)
3517 intel_unmap(dev, dev_addr);
3520 static void *intel_alloc_coherent(struct device *dev, size_t size,
3521 dma_addr_t *dma_handle, gfp_t flags,
3522 struct dma_attrs *attrs)
3524 struct page *page = NULL;
3527 size = PAGE_ALIGN(size);
3528 order = get_order(size);
3530 if (!iommu_no_mapping(dev))
3531 flags &= ~(GFP_DMA | GFP_DMA32);
3532 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3533 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3539 if (flags & __GFP_WAIT) {
3540 unsigned int count = size >> PAGE_SHIFT;
3542 page = dma_alloc_from_contiguous(dev, count, order);
3543 if (page && iommu_no_mapping(dev) &&
3544 page_to_phys(page) + size > dev->coherent_dma_mask) {
3545 dma_release_from_contiguous(dev, page, count);
3551 page = alloc_pages(flags, order);
3554 memset(page_address(page), 0, size);
3556 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3558 dev->coherent_dma_mask);
3560 return page_address(page);
3561 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3562 __free_pages(page, order);
3567 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3568 dma_addr_t dma_handle, struct dma_attrs *attrs)
3571 struct page *page = virt_to_page(vaddr);
3573 size = PAGE_ALIGN(size);
3574 order = get_order(size);
3576 intel_unmap(dev, dma_handle);
3577 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3578 __free_pages(page, order);
3581 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3582 int nelems, enum dma_data_direction dir,
3583 struct dma_attrs *attrs)
3585 intel_unmap(dev, sglist[0].dma_address);
3588 static int intel_nontranslate_map_sg(struct device *hddev,
3589 struct scatterlist *sglist, int nelems, int dir)
3592 struct scatterlist *sg;
3594 for_each_sg(sglist, sg, nelems, i) {
3595 BUG_ON(!sg_page(sg));
3596 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3597 sg->dma_length = sg->length;
3602 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3603 enum dma_data_direction dir, struct dma_attrs *attrs)
3606 struct dmar_domain *domain;
3609 struct iova *iova = NULL;
3611 struct scatterlist *sg;
3612 unsigned long start_vpfn;
3613 struct intel_iommu *iommu;
3615 BUG_ON(dir == DMA_NONE);
3616 if (iommu_no_mapping(dev))
3617 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3619 domain = get_valid_domain_for_dev(dev);
3623 iommu = domain_get_iommu(domain);
3625 for_each_sg(sglist, sg, nelems, i)
3626 size += aligned_nrpages(sg->offset, sg->length);
3628 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3631 sglist->dma_length = 0;
3636 * Check if DMAR supports zero-length reads on write only
3639 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3640 !cap_zlr(iommu->cap))
3641 prot |= DMA_PTE_READ;
3642 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3643 prot |= DMA_PTE_WRITE;
3645 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3647 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3648 if (unlikely(ret)) {
3649 dma_pte_free_pagetable(domain, start_vpfn,
3650 start_vpfn + size - 1);
3651 __free_iova(&domain->iovad, iova);
3655 /* it's a non-present to present mapping. Only flush if caching mode */
3656 if (cap_caching_mode(iommu->cap))
3657 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3659 iommu_flush_write_buffer(iommu);
3664 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3669 struct dma_map_ops intel_dma_ops = {
3670 .alloc = intel_alloc_coherent,
3671 .free = intel_free_coherent,
3672 .map_sg = intel_map_sg,
3673 .unmap_sg = intel_unmap_sg,
3674 .map_page = intel_map_page,
3675 .unmap_page = intel_unmap_page,
3676 .mapping_error = intel_mapping_error,
3679 static inline int iommu_domain_cache_init(void)
3683 iommu_domain_cache = kmem_cache_create("iommu_domain",
3684 sizeof(struct dmar_domain),
3689 if (!iommu_domain_cache) {
3690 pr_err("Couldn't create iommu_domain cache\n");
3697 static inline int iommu_devinfo_cache_init(void)
3701 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3702 sizeof(struct device_domain_info),
3706 if (!iommu_devinfo_cache) {
3707 pr_err("Couldn't create devinfo cache\n");
3714 static int __init iommu_init_mempool(void)
3717 ret = iommu_iova_cache_init();
3721 ret = iommu_domain_cache_init();
3725 ret = iommu_devinfo_cache_init();
3729 kmem_cache_destroy(iommu_domain_cache);
3731 iommu_iova_cache_destroy();
3736 static void __init iommu_exit_mempool(void)
3738 kmem_cache_destroy(iommu_devinfo_cache);
3739 kmem_cache_destroy(iommu_domain_cache);
3740 iommu_iova_cache_destroy();
3743 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3745 struct dmar_drhd_unit *drhd;
3749 /* We know that this device on this chipset has its own IOMMU.
3750 * If we find it under a different IOMMU, then the BIOS is lying
3751 * to us. Hope that the IOMMU for this device is actually
3752 * disabled, and it needs no translation...
3754 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3756 /* "can't" happen */
3757 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3760 vtbar &= 0xffff0000;
3762 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3763 drhd = dmar_find_matched_drhd_unit(pdev);
3764 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3765 TAINT_FIRMWARE_WORKAROUND,
3766 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3767 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3769 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3771 static void __init init_no_remapping_devices(void)
3773 struct dmar_drhd_unit *drhd;
3777 for_each_drhd_unit(drhd) {
3778 if (!drhd->include_all) {
3779 for_each_active_dev_scope(drhd->devices,
3780 drhd->devices_cnt, i, dev)
3782 /* ignore DMAR unit if no devices exist */
3783 if (i == drhd->devices_cnt)
3788 for_each_active_drhd_unit(drhd) {
3789 if (drhd->include_all)
3792 for_each_active_dev_scope(drhd->devices,
3793 drhd->devices_cnt, i, dev)
3794 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3796 if (i < drhd->devices_cnt)
3799 /* This IOMMU has *only* gfx devices. Either bypass it or
3800 set the gfx_mapped flag, as appropriate */
3802 intel_iommu_gfx_mapped = 1;
3805 for_each_active_dev_scope(drhd->devices,
3806 drhd->devices_cnt, i, dev)
3807 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3812 #ifdef CONFIG_SUSPEND
3813 static int init_iommu_hw(void)
3815 struct dmar_drhd_unit *drhd;
3816 struct intel_iommu *iommu = NULL;
3818 for_each_active_iommu(iommu, drhd)
3820 dmar_reenable_qi(iommu);
3822 for_each_iommu(iommu, drhd) {
3823 if (drhd->ignored) {
3825 * we always have to disable PMRs or DMA may fail on
3829 iommu_disable_protect_mem_regions(iommu);
3833 iommu_flush_write_buffer(iommu);
3835 iommu_set_root_entry(iommu);
3837 iommu->flush.flush_context(iommu, 0, 0, 0,
3838 DMA_CCMD_GLOBAL_INVL);
3839 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3840 iommu_enable_translation(iommu);
3841 iommu_disable_protect_mem_regions(iommu);
3847 static void iommu_flush_all(void)
3849 struct dmar_drhd_unit *drhd;
3850 struct intel_iommu *iommu;
3852 for_each_active_iommu(iommu, drhd) {
3853 iommu->flush.flush_context(iommu, 0, 0, 0,
3854 DMA_CCMD_GLOBAL_INVL);
3855 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3856 DMA_TLB_GLOBAL_FLUSH);
3860 static int iommu_suspend(void)
3862 struct dmar_drhd_unit *drhd;
3863 struct intel_iommu *iommu = NULL;
3866 for_each_active_iommu(iommu, drhd) {
3867 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3869 if (!iommu->iommu_state)
3875 for_each_active_iommu(iommu, drhd) {
3876 iommu_disable_translation(iommu);
3878 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3880 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3881 readl(iommu->reg + DMAR_FECTL_REG);
3882 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3883 readl(iommu->reg + DMAR_FEDATA_REG);
3884 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3885 readl(iommu->reg + DMAR_FEADDR_REG);
3886 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3887 readl(iommu->reg + DMAR_FEUADDR_REG);
3889 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3894 for_each_active_iommu(iommu, drhd)
3895 kfree(iommu->iommu_state);
3900 static void iommu_resume(void)
3902 struct dmar_drhd_unit *drhd;
3903 struct intel_iommu *iommu = NULL;
3906 if (init_iommu_hw()) {
3908 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3910 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3914 for_each_active_iommu(iommu, drhd) {
3916 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3918 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3919 iommu->reg + DMAR_FECTL_REG);
3920 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3921 iommu->reg + DMAR_FEDATA_REG);
3922 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3923 iommu->reg + DMAR_FEADDR_REG);
3924 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3925 iommu->reg + DMAR_FEUADDR_REG);
3927 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3930 for_each_active_iommu(iommu, drhd)
3931 kfree(iommu->iommu_state);
3934 static struct syscore_ops iommu_syscore_ops = {
3935 .resume = iommu_resume,
3936 .suspend = iommu_suspend,
3939 static void __init init_iommu_pm_ops(void)
3941 register_syscore_ops(&iommu_syscore_ops);
3945 static inline void init_iommu_pm_ops(void) {}
3946 #endif /* CONFIG_PM */
3949 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3951 struct acpi_dmar_reserved_memory *rmrr;
3952 struct dmar_rmrr_unit *rmrru;
3954 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3958 rmrru->hdr = header;
3959 rmrr = (struct acpi_dmar_reserved_memory *)header;
3960 rmrru->base_address = rmrr->base_address;
3961 rmrru->end_address = rmrr->end_address;
3962 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3963 ((void *)rmrr) + rmrr->header.length,
3964 &rmrru->devices_cnt);
3965 if (rmrru->devices_cnt && rmrru->devices == NULL) {
3970 list_add(&rmrru->list, &dmar_rmrr_units);
3975 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3977 struct dmar_atsr_unit *atsru;
3978 struct acpi_dmar_atsr *tmp;
3980 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3981 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3982 if (atsr->segment != tmp->segment)
3984 if (atsr->header.length != tmp->header.length)
3986 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3993 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3995 struct acpi_dmar_atsr *atsr;
3996 struct dmar_atsr_unit *atsru;
3998 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4001 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4002 atsru = dmar_find_atsr(atsr);
4006 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4011 * If memory is allocated from slab by ACPI _DSM method, we need to
4012 * copy the memory content because the memory buffer will be freed
4015 atsru->hdr = (void *)(atsru + 1);
4016 memcpy(atsru->hdr, hdr, hdr->length);
4017 atsru->include_all = atsr->flags & 0x1;
4018 if (!atsru->include_all) {
4019 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4020 (void *)atsr + atsr->header.length,
4021 &atsru->devices_cnt);
4022 if (atsru->devices_cnt && atsru->devices == NULL) {
4028 list_add_rcu(&atsru->list, &dmar_atsr_units);
4033 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4035 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4039 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4041 struct acpi_dmar_atsr *atsr;
4042 struct dmar_atsr_unit *atsru;
4044 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4045 atsru = dmar_find_atsr(atsr);
4047 list_del_rcu(&atsru->list);
4049 intel_iommu_free_atsr(atsru);
4055 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4059 struct acpi_dmar_atsr *atsr;
4060 struct dmar_atsr_unit *atsru;
4062 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4063 atsru = dmar_find_atsr(atsr);
4067 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
4068 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4075 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4078 struct intel_iommu *iommu = dmaru->iommu;
4080 if (g_iommus[iommu->seq_id])
4083 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4084 pr_warn("%s: Doesn't support hardware pass through.\n",
4088 if (!ecap_sc_support(iommu->ecap) &&
4089 domain_update_iommu_snooping(iommu)) {
4090 pr_warn("%s: Doesn't support snooping.\n",
4094 sp = domain_update_iommu_superpage(iommu) - 1;
4095 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4096 pr_warn("%s: Doesn't support large page.\n",
4102 * Disable translation if already enabled prior to OS handover.
4104 if (iommu->gcmd & DMA_GCMD_TE)
4105 iommu_disable_translation(iommu);
4107 g_iommus[iommu->seq_id] = iommu;
4108 ret = iommu_init_domains(iommu);
4110 ret = iommu_alloc_root_entry(iommu);
4114 if (dmaru->ignored) {
4116 * we always have to disable PMRs or DMA may fail on this device
4119 iommu_disable_protect_mem_regions(iommu);
4123 intel_iommu_init_qi(iommu);
4124 iommu_flush_write_buffer(iommu);
4125 ret = dmar_set_interrupt(iommu);
4129 iommu_set_root_entry(iommu);
4130 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4131 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4132 iommu_enable_translation(iommu);
4135 ret = iommu_attach_domain(si_domain, iommu);
4136 if (ret < 0 || si_domain->id != ret)
4138 domain_attach_iommu(si_domain, iommu);
4141 iommu_disable_protect_mem_regions(iommu);
4145 disable_dmar_iommu(iommu);
4147 free_dmar_iommu(iommu);
4151 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4154 struct intel_iommu *iommu = dmaru->iommu;
4156 if (!intel_iommu_enabled)
4162 ret = intel_iommu_add(dmaru);
4164 disable_dmar_iommu(iommu);
4165 free_dmar_iommu(iommu);
4171 static void intel_iommu_free_dmars(void)
4173 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4174 struct dmar_atsr_unit *atsru, *atsr_n;
4176 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4177 list_del(&rmrru->list);
4178 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4182 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4183 list_del(&atsru->list);
4184 intel_iommu_free_atsr(atsru);
4188 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4191 struct pci_bus *bus;
4192 struct pci_dev *bridge = NULL;
4194 struct acpi_dmar_atsr *atsr;
4195 struct dmar_atsr_unit *atsru;
4197 dev = pci_physfn(dev);
4198 for (bus = dev->bus; bus; bus = bus->parent) {
4200 if (!bridge || !pci_is_pcie(bridge) ||
4201 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4203 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4210 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4211 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4212 if (atsr->segment != pci_domain_nr(dev->bus))
4215 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4216 if (tmp == &bridge->dev)
4219 if (atsru->include_all)
4229 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4232 struct dmar_rmrr_unit *rmrru;
4233 struct dmar_atsr_unit *atsru;
4234 struct acpi_dmar_atsr *atsr;
4235 struct acpi_dmar_reserved_memory *rmrr;
4237 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4240 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4241 rmrr = container_of(rmrru->hdr,
4242 struct acpi_dmar_reserved_memory, header);
4243 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4244 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4245 ((void *)rmrr) + rmrr->header.length,
4246 rmrr->segment, rmrru->devices,
4247 rmrru->devices_cnt);
4250 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4251 dmar_remove_dev_scope(info, rmrr->segment,
4252 rmrru->devices, rmrru->devices_cnt);
4256 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4257 if (atsru->include_all)
4260 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4261 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4262 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4263 (void *)atsr + atsr->header.length,
4264 atsr->segment, atsru->devices,
4265 atsru->devices_cnt);
4270 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
4271 if (dmar_remove_dev_scope(info, atsr->segment,
4272 atsru->devices, atsru->devices_cnt))
4281 * Here we only respond to action of unbound device from driver.
4283 * Added device is not attached to its DMAR domain here yet. That will happen
4284 * when mapping the device to iova.
4286 static int device_notifier(struct notifier_block *nb,
4287 unsigned long action, void *data)
4289 struct device *dev = data;
4290 struct dmar_domain *domain;
4292 if (iommu_dummy(dev))
4295 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4298 domain = find_domain(dev);
4302 down_read(&dmar_global_lock);
4303 domain_remove_one_dev_info(domain, dev);
4304 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4305 domain_exit(domain);
4306 up_read(&dmar_global_lock);
4311 static struct notifier_block device_nb = {
4312 .notifier_call = device_notifier,
4315 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4316 unsigned long val, void *v)
4318 struct memory_notify *mhp = v;
4319 unsigned long long start, end;
4320 unsigned long start_vpfn, last_vpfn;
4323 case MEM_GOING_ONLINE:
4324 start = mhp->start_pfn << PAGE_SHIFT;
4325 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4326 if (iommu_domain_identity_map(si_domain, start, end)) {
4327 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4334 case MEM_CANCEL_ONLINE:
4335 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4336 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4337 while (start_vpfn <= last_vpfn) {
4339 struct dmar_drhd_unit *drhd;
4340 struct intel_iommu *iommu;
4341 struct page *freelist;
4343 iova = find_iova(&si_domain->iovad, start_vpfn);
4345 pr_debug("Failed get IOVA for PFN %lx\n",
4350 iova = split_and_remove_iova(&si_domain->iovad, iova,
4351 start_vpfn, last_vpfn);
4353 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4354 start_vpfn, last_vpfn);
4358 freelist = domain_unmap(si_domain, iova->pfn_lo,
4362 for_each_active_iommu(iommu, drhd)
4363 iommu_flush_iotlb_psi(iommu, si_domain->id,
4364 iova->pfn_lo, iova_size(iova),
4367 dma_free_pagelist(freelist);
4369 start_vpfn = iova->pfn_hi + 1;
4370 free_iova_mem(iova);
4378 static struct notifier_block intel_iommu_memory_nb = {
4379 .notifier_call = intel_iommu_memory_notifier,
4384 static ssize_t intel_iommu_show_version(struct device *dev,
4385 struct device_attribute *attr,
4388 struct intel_iommu *iommu = dev_get_drvdata(dev);
4389 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4390 return sprintf(buf, "%d:%d\n",
4391 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4393 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4395 static ssize_t intel_iommu_show_address(struct device *dev,
4396 struct device_attribute *attr,
4399 struct intel_iommu *iommu = dev_get_drvdata(dev);
4400 return sprintf(buf, "%llx\n", iommu->reg_phys);
4402 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4404 static ssize_t intel_iommu_show_cap(struct device *dev,
4405 struct device_attribute *attr,
4408 struct intel_iommu *iommu = dev_get_drvdata(dev);
4409 return sprintf(buf, "%llx\n", iommu->cap);
4411 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4413 static ssize_t intel_iommu_show_ecap(struct device *dev,
4414 struct device_attribute *attr,
4417 struct intel_iommu *iommu = dev_get_drvdata(dev);
4418 return sprintf(buf, "%llx\n", iommu->ecap);
4420 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4422 static struct attribute *intel_iommu_attrs[] = {
4423 &dev_attr_version.attr,
4424 &dev_attr_address.attr,
4426 &dev_attr_ecap.attr,
4430 static struct attribute_group intel_iommu_group = {
4431 .name = "intel-iommu",
4432 .attrs = intel_iommu_attrs,
4435 const struct attribute_group *intel_iommu_groups[] = {
4440 int __init intel_iommu_init(void)
4443 struct dmar_drhd_unit *drhd;
4444 struct intel_iommu *iommu;
4446 /* VT-d is required for a TXT/tboot launch, so enforce that */
4447 force_on = tboot_force_iommu();
4449 if (iommu_init_mempool()) {
4451 panic("tboot: Failed to initialize iommu memory\n");
4455 down_write(&dmar_global_lock);
4456 if (dmar_table_init()) {
4458 panic("tboot: Failed to initialize DMAR table\n");
4463 * Disable translation if already enabled prior to OS handover.
4465 for_each_active_iommu(iommu, drhd)
4466 if (iommu->gcmd & DMA_GCMD_TE)
4467 iommu_disable_translation(iommu);
4469 if (dmar_dev_scope_init() < 0) {
4471 panic("tboot: Failed to initialize DMAR device scope\n");
4475 if (no_iommu || dmar_disabled)
4478 if (list_empty(&dmar_rmrr_units))
4479 pr_info("No RMRR found\n");
4481 if (list_empty(&dmar_atsr_units))
4482 pr_info("No ATSR found\n");
4484 if (dmar_init_reserved_ranges()) {
4486 panic("tboot: Failed to reserve iommu ranges\n");
4487 goto out_free_reserved_range;
4490 init_no_remapping_devices();
4495 panic("tboot: Failed to initialize DMARs\n");
4496 pr_err("Initialization failed\n");
4497 goto out_free_reserved_range;
4499 up_write(&dmar_global_lock);
4500 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4502 init_timer(&unmap_timer);
4503 #ifdef CONFIG_SWIOTLB
4506 dma_ops = &intel_dma_ops;
4508 init_iommu_pm_ops();
4510 for_each_active_iommu(iommu, drhd)
4511 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4515 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4516 bus_register_notifier(&pci_bus_type, &device_nb);
4517 if (si_domain && !hw_pass_through)
4518 register_memory_notifier(&intel_iommu_memory_nb);
4520 intel_iommu_enabled = 1;
4524 out_free_reserved_range:
4525 put_iova_domain(&reserved_iova_list);
4527 intel_iommu_free_dmars();
4528 up_write(&dmar_global_lock);
4529 iommu_exit_mempool();
4533 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4535 struct intel_iommu *iommu = opaque;
4537 iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4542 * NB - intel-iommu lacks any sort of reference counting for the users of
4543 * dependent devices. If multiple endpoints have intersecting dependent
4544 * devices, unbinding the driver from any one of them will possibly leave
4545 * the others unable to operate.
4547 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4550 if (!iommu || !dev || !dev_is_pci(dev))
4553 pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4556 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4559 struct device_domain_info *info, *tmp;
4560 struct intel_iommu *iommu;
4561 unsigned long flags;
4565 iommu = device_to_iommu(dev, &bus, &devfn);
4569 spin_lock_irqsave(&device_domain_lock, flags);
4570 list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4571 if (info->iommu == iommu && info->bus == bus &&
4572 info->devfn == devfn) {
4573 unlink_domain_info(info);
4574 spin_unlock_irqrestore(&device_domain_lock, flags);
4576 iommu_disable_dev_iotlb(info);
4577 iommu_detach_dev(iommu, info->bus, info->devfn);
4578 iommu_detach_dependent_devices(iommu, dev);
4579 free_devinfo_mem(info);
4581 spin_lock_irqsave(&device_domain_lock, flags);
4589 /* if there is no other devices under the same iommu
4590 * owned by this domain, clear this iommu in iommu_bmp
4591 * update iommu count and coherency
4593 if (info->iommu == iommu)
4597 spin_unlock_irqrestore(&device_domain_lock, flags);
4600 domain_detach_iommu(domain, iommu);
4601 if (!domain_type_is_vm_or_si(domain))
4602 iommu_detach_domain(domain, iommu);
4606 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4610 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4612 domain_reserve_special_ranges(domain);
4614 /* calculate AGAW */
4615 domain->gaw = guest_width;
4616 adjust_width = guestwidth_to_adjustwidth(guest_width);
4617 domain->agaw = width_to_agaw(adjust_width);
4619 domain->iommu_coherency = 0;
4620 domain->iommu_snooping = 0;
4621 domain->iommu_superpage = 0;
4622 domain->max_addr = 0;
4624 /* always allocate the top pgd */
4625 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4628 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4632 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4634 struct dmar_domain *dmar_domain;
4635 struct iommu_domain *domain;
4637 if (type != IOMMU_DOMAIN_UNMANAGED)
4640 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4642 pr_err("Can't allocate dmar_domain\n");
4645 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4646 pr_err("Domain initialization failed\n");
4647 domain_exit(dmar_domain);
4650 domain_update_iommu_cap(dmar_domain);
4652 domain = &dmar_domain->domain;
4653 domain->geometry.aperture_start = 0;
4654 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4655 domain->geometry.force_aperture = true;
4660 static void intel_iommu_domain_free(struct iommu_domain *domain)
4662 domain_exit(to_dmar_domain(domain));
4665 static int intel_iommu_attach_device(struct iommu_domain *domain,
4668 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4669 struct intel_iommu *iommu;
4673 if (device_is_rmrr_locked(dev)) {
4674 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4678 /* normally dev is not mapped */
4679 if (unlikely(domain_context_mapped(dev))) {
4680 struct dmar_domain *old_domain;
4682 old_domain = find_domain(dev);
4684 if (domain_type_is_vm_or_si(dmar_domain))
4685 domain_remove_one_dev_info(old_domain, dev);
4687 domain_remove_dev_info(old_domain);
4689 if (!domain_type_is_vm_or_si(old_domain) &&
4690 list_empty(&old_domain->devices))
4691 domain_exit(old_domain);
4695 iommu = device_to_iommu(dev, &bus, &devfn);
4699 /* check if this iommu agaw is sufficient for max mapped address */
4700 addr_width = agaw_to_width(iommu->agaw);
4701 if (addr_width > cap_mgaw(iommu->cap))
4702 addr_width = cap_mgaw(iommu->cap);
4704 if (dmar_domain->max_addr > (1LL << addr_width)) {
4705 pr_err("%s: iommu width (%d) is not "
4706 "sufficient for the mapped address (%llx)\n",
4707 __func__, addr_width, dmar_domain->max_addr);
4710 dmar_domain->gaw = addr_width;
4713 * Knock out extra levels of page tables if necessary
4715 while (iommu->agaw < dmar_domain->agaw) {
4716 struct dma_pte *pte;
4718 pte = dmar_domain->pgd;
4719 if (dma_pte_present(pte)) {
4720 dmar_domain->pgd = (struct dma_pte *)
4721 phys_to_virt(dma_pte_addr(pte));
4722 free_pgtable_page(pte);
4724 dmar_domain->agaw--;
4727 return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
4730 static void intel_iommu_detach_device(struct iommu_domain *domain,
4733 domain_remove_one_dev_info(to_dmar_domain(domain), dev);
4736 static int intel_iommu_map(struct iommu_domain *domain,
4737 unsigned long iova, phys_addr_t hpa,
4738 size_t size, int iommu_prot)
4740 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4745 if (iommu_prot & IOMMU_READ)
4746 prot |= DMA_PTE_READ;
4747 if (iommu_prot & IOMMU_WRITE)
4748 prot |= DMA_PTE_WRITE;
4749 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4750 prot |= DMA_PTE_SNP;
4752 max_addr = iova + size;
4753 if (dmar_domain->max_addr < max_addr) {
4756 /* check if minimum agaw is sufficient for mapped address */
4757 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4758 if (end < max_addr) {
4759 pr_err("%s: iommu width (%d) is not "
4760 "sufficient for the mapped address (%llx)\n",
4761 __func__, dmar_domain->gaw, max_addr);
4764 dmar_domain->max_addr = max_addr;
4766 /* Round up size to next multiple of PAGE_SIZE, if it and
4767 the low bits of hpa would take us onto the next page */
4768 size = aligned_nrpages(hpa, size);
4769 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4770 hpa >> VTD_PAGE_SHIFT, size, prot);
4774 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4775 unsigned long iova, size_t size)
4777 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4778 struct page *freelist = NULL;
4779 struct intel_iommu *iommu;
4780 unsigned long start_pfn, last_pfn;
4781 unsigned int npages;
4782 int iommu_id, num, ndomains, level = 0;
4784 /* Cope with horrid API which requires us to unmap more than the
4785 size argument if it happens to be a large-page mapping. */
4786 if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4789 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4790 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4792 start_pfn = iova >> VTD_PAGE_SHIFT;
4793 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4795 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4797 npages = last_pfn - start_pfn + 1;
4799 for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4800 iommu = g_iommus[iommu_id];
4803 * find bit position of dmar_domain
4805 ndomains = cap_ndoms(iommu->cap);
4806 for_each_set_bit(num, iommu->domain_ids, ndomains) {
4807 if (iommu->domains[num] == dmar_domain)
4808 iommu_flush_iotlb_psi(iommu, num, start_pfn,
4809 npages, !freelist, 0);
4814 dma_free_pagelist(freelist);
4816 if (dmar_domain->max_addr == iova + size)
4817 dmar_domain->max_addr = iova;
4822 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4825 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4826 struct dma_pte *pte;
4830 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4832 phys = dma_pte_addr(pte);
4837 static bool intel_iommu_capable(enum iommu_cap cap)
4839 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4840 return domain_update_iommu_snooping(NULL) == 1;
4841 if (cap == IOMMU_CAP_INTR_REMAP)
4842 return irq_remapping_enabled == 1;
4847 static int intel_iommu_add_device(struct device *dev)
4849 struct intel_iommu *iommu;
4850 struct iommu_group *group;
4853 iommu = device_to_iommu(dev, &bus, &devfn);
4857 iommu_device_link(iommu->iommu_dev, dev);
4859 group = iommu_group_get_for_dev(dev);
4862 return PTR_ERR(group);
4864 iommu_group_put(group);
4868 static void intel_iommu_remove_device(struct device *dev)
4870 struct intel_iommu *iommu;
4873 iommu = device_to_iommu(dev, &bus, &devfn);
4877 iommu_group_remove_device(dev);
4879 iommu_device_unlink(iommu->iommu_dev, dev);
4882 static const struct iommu_ops intel_iommu_ops = {
4883 .capable = intel_iommu_capable,
4884 .domain_alloc = intel_iommu_domain_alloc,
4885 .domain_free = intel_iommu_domain_free,
4886 .attach_dev = intel_iommu_attach_device,
4887 .detach_dev = intel_iommu_detach_device,
4888 .map = intel_iommu_map,
4889 .unmap = intel_iommu_unmap,
4890 .map_sg = default_iommu_map_sg,
4891 .iova_to_phys = intel_iommu_iova_to_phys,
4892 .add_device = intel_iommu_add_device,
4893 .remove_device = intel_iommu_remove_device,
4894 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4897 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4899 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4900 pr_info("Disabling IOMMU for graphics on this chipset\n");
4904 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4905 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4906 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4907 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4908 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4909 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4910 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4912 static void quirk_iommu_rwbf(struct pci_dev *dev)
4915 * Mobile 4 Series Chipset neglects to set RWBF capability,
4916 * but needs it. Same seems to hold for the desktop versions.
4918 pr_info("Forcing write-buffer flush capability\n");
4922 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4923 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4924 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4925 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4926 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4927 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4928 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4931 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4932 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4933 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4934 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4935 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4936 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4937 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4938 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4940 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4944 if (pci_read_config_word(dev, GGC, &ggc))
4947 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4948 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4950 } else if (dmar_map_gfx) {
4951 /* we have to ensure the gfx device is idle before we flush */
4952 pr_info("Disabling batched IOTLB flush on Ironlake\n");
4953 intel_iommu_strict = 1;
4956 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4957 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4958 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4959 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4961 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4962 ISOCH DMAR unit for the Azalia sound device, but not give it any
4963 TLB entries, which causes it to deadlock. Check for that. We do
4964 this in a function called from init_dmars(), instead of in a PCI
4965 quirk, because we don't want to print the obnoxious "BIOS broken"
4966 message if VT-d is actually disabled.
4968 static void __init check_tylersburg_isoch(void)
4970 struct pci_dev *pdev;
4971 uint32_t vtisochctrl;
4973 /* If there's no Azalia in the system anyway, forget it. */
4974 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4979 /* System Management Registers. Might be hidden, in which case
4980 we can't do the sanity check. But that's OK, because the
4981 known-broken BIOSes _don't_ actually hide it, so far. */
4982 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4986 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4993 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4994 if (vtisochctrl & 1)
4997 /* Drop all bits other than the number of TLB entries */
4998 vtisochctrl &= 0x1c;
5000 /* If we have the recommended number of TLB entries (16), fine. */
5001 if (vtisochctrl == 0x10)
5004 /* Zero TLB entries? You get to ride the short bus to school. */
5006 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5007 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5008 dmi_get_system_info(DMI_BIOS_VENDOR),
5009 dmi_get_system_info(DMI_BIOS_VERSION),
5010 dmi_get_system_info(DMI_PRODUCT_VERSION));
5011 iommu_identity_mapping |= IDENTMAP_AZALIA;
5015 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",