2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <linux/dmi.h>
41 #include <asm/cacheflush.h>
42 #include <asm/iommu.h>
45 #define ROOT_SIZE VTD_PAGE_SIZE
46 #define CONTEXT_SIZE VTD_PAGE_SIZE
48 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
49 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
51 #define IOAPIC_RANGE_START (0xfee00000)
52 #define IOAPIC_RANGE_END (0xfeefffff)
53 #define IOVA_START_ADDR (0x1000)
55 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
57 #define MAX_AGAW_WIDTH 64
59 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
60 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
62 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
63 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
64 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
67 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
68 are never going to work. */
69 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
71 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
74 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
76 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
78 static inline unsigned long page_to_dma_pfn(struct page *pg)
80 return mm_to_dma_pfn(page_to_pfn(pg));
82 static inline unsigned long virt_to_dma_pfn(void *p)
84 return page_to_dma_pfn(virt_to_page(p));
87 /* global iommu list, set NULL for ignored DMAR units */
88 static struct intel_iommu **g_iommus;
90 static int rwbf_quirk;
95 * 12-63: Context Ptr (12 - (haw-1))
102 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
103 static inline bool root_present(struct root_entry *root)
105 return (root->val & 1);
107 static inline void set_root_present(struct root_entry *root)
111 static inline void set_root_value(struct root_entry *root, unsigned long value)
113 root->val |= value & VTD_PAGE_MASK;
116 static inline struct context_entry *
117 get_context_addr_from_root(struct root_entry *root)
119 return (struct context_entry *)
120 (root_present(root)?phys_to_virt(
121 root->val & VTD_PAGE_MASK) :
128 * 1: fault processing disable
129 * 2-3: translation type
130 * 12-63: address space root
136 struct context_entry {
141 static inline bool context_present(struct context_entry *context)
143 return (context->lo & 1);
145 static inline void context_set_present(struct context_entry *context)
150 static inline void context_set_fault_enable(struct context_entry *context)
152 context->lo &= (((u64)-1) << 2) | 1;
155 static inline void context_set_translation_type(struct context_entry *context,
158 context->lo &= (((u64)-1) << 4) | 3;
159 context->lo |= (value & 3) << 2;
162 static inline void context_set_address_root(struct context_entry *context,
165 context->lo |= value & VTD_PAGE_MASK;
168 static inline void context_set_address_width(struct context_entry *context,
171 context->hi |= value & 7;
174 static inline void context_set_domain_id(struct context_entry *context,
177 context->hi |= (value & ((1 << 16) - 1)) << 8;
180 static inline void context_clear_entry(struct context_entry *context)
193 * 12-63: Host physcial address
199 static inline void dma_clear_pte(struct dma_pte *pte)
204 static inline void dma_set_pte_readable(struct dma_pte *pte)
206 pte->val |= DMA_PTE_READ;
209 static inline void dma_set_pte_writable(struct dma_pte *pte)
211 pte->val |= DMA_PTE_WRITE;
214 static inline void dma_set_pte_snp(struct dma_pte *pte)
216 pte->val |= DMA_PTE_SNP;
219 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
221 pte->val = (pte->val & ~3) | (prot & 3);
224 static inline u64 dma_pte_addr(struct dma_pte *pte)
227 return pte->val & VTD_PAGE_MASK;
229 /* Must have a full atomic 64-bit read */
230 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
234 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
236 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
239 static inline bool dma_pte_present(struct dma_pte *pte)
241 return (pte->val & 3) != 0;
244 static inline int first_pte_in_page(struct dma_pte *pte)
246 return !((unsigned long)pte & ~VTD_PAGE_MASK);
250 * This domain is a statically identity mapping domain.
251 * 1. This domain creats a static 1:1 mapping to all usable memory.
252 * 2. It maps to each iommu if successful.
253 * 3. Each iommu mapps to this domain if successful.
255 static struct dmar_domain *si_domain;
256 static int hw_pass_through = 1;
258 /* devices under the same p2p bridge are owned in one domain */
259 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
261 /* domain represents a virtual machine, more than one devices
262 * across iommus may be owned in one domain, e.g. kvm guest.
264 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
266 /* si_domain contains mulitple devices */
267 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
270 int id; /* domain id */
271 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
273 struct list_head devices; /* all devices' list */
274 struct iova_domain iovad; /* iova's that belong to this domain */
276 struct dma_pte *pgd; /* virtual address */
277 int gaw; /* max guest address width */
279 /* adjusted guest address width, 0 is level 2 30-bit */
282 int flags; /* flags to find out type of domain */
284 int iommu_coherency;/* indicate coherency of iommu access */
285 int iommu_snooping; /* indicate snooping control feature*/
286 int iommu_count; /* reference count of iommu */
287 spinlock_t iommu_lock; /* protect iommu set in domain */
288 u64 max_addr; /* maximum mapped address */
291 /* PCI domain-device relationship */
292 struct device_domain_info {
293 struct list_head link; /* link to domain siblings */
294 struct list_head global; /* link to global list */
295 int segment; /* PCI domain */
296 u8 bus; /* PCI bus number */
297 u8 devfn; /* PCI devfn number */
298 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
299 struct intel_iommu *iommu; /* IOMMU used by this device */
300 struct dmar_domain *domain; /* pointer to domain */
303 static void flush_unmaps_timeout(unsigned long data);
305 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
307 #define HIGH_WATER_MARK 250
308 struct deferred_flush_tables {
310 struct iova *iova[HIGH_WATER_MARK];
311 struct dmar_domain *domain[HIGH_WATER_MARK];
314 static struct deferred_flush_tables *deferred_flush;
316 /* bitmap for indexing intel_iommus */
317 static int g_num_of_iommus;
319 static DEFINE_SPINLOCK(async_umap_flush_lock);
320 static LIST_HEAD(unmaps_to_do);
323 static long list_size;
325 static void domain_remove_dev_info(struct dmar_domain *domain);
327 #ifdef CONFIG_DMAR_DEFAULT_ON
328 int dmar_disabled = 0;
330 int dmar_disabled = 1;
331 #endif /*CONFIG_DMAR_DEFAULT_ON*/
333 static int __initdata dmar_map_gfx = 1;
334 static int dmar_forcedac;
335 static int intel_iommu_strict;
337 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
338 static DEFINE_SPINLOCK(device_domain_lock);
339 static LIST_HEAD(device_domain_list);
341 static struct iommu_ops intel_iommu_ops;
343 static int __init intel_iommu_setup(char *str)
348 if (!strncmp(str, "on", 2)) {
350 printk(KERN_INFO "Intel-IOMMU: enabled\n");
351 } else if (!strncmp(str, "off", 3)) {
353 printk(KERN_INFO "Intel-IOMMU: disabled\n");
354 } else if (!strncmp(str, "igfx_off", 8)) {
357 "Intel-IOMMU: disable GFX device mapping\n");
358 } else if (!strncmp(str, "forcedac", 8)) {
360 "Intel-IOMMU: Forcing DAC for PCI devices\n");
362 } else if (!strncmp(str, "strict", 6)) {
364 "Intel-IOMMU: disable batched IOTLB flush\n");
365 intel_iommu_strict = 1;
368 str += strcspn(str, ",");
374 __setup("intel_iommu=", intel_iommu_setup);
376 static struct kmem_cache *iommu_domain_cache;
377 static struct kmem_cache *iommu_devinfo_cache;
378 static struct kmem_cache *iommu_iova_cache;
380 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
385 /* trying to avoid low memory issues */
386 flags = current->flags & PF_MEMALLOC;
387 current->flags |= PF_MEMALLOC;
388 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
389 current->flags &= (~PF_MEMALLOC | flags);
394 static inline void *alloc_pgtable_page(void)
399 /* trying to avoid low memory issues */
400 flags = current->flags & PF_MEMALLOC;
401 current->flags |= PF_MEMALLOC;
402 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
403 current->flags &= (~PF_MEMALLOC | flags);
407 static inline void free_pgtable_page(void *vaddr)
409 free_page((unsigned long)vaddr);
412 static inline void *alloc_domain_mem(void)
414 return iommu_kmem_cache_alloc(iommu_domain_cache);
417 static void free_domain_mem(void *vaddr)
419 kmem_cache_free(iommu_domain_cache, vaddr);
422 static inline void * alloc_devinfo_mem(void)
424 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
427 static inline void free_devinfo_mem(void *vaddr)
429 kmem_cache_free(iommu_devinfo_cache, vaddr);
432 struct iova *alloc_iova_mem(void)
434 return iommu_kmem_cache_alloc(iommu_iova_cache);
437 void free_iova_mem(struct iova *iova)
439 kmem_cache_free(iommu_iova_cache, iova);
443 static inline int width_to_agaw(int width);
445 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
450 sagaw = cap_sagaw(iommu->cap);
451 for (agaw = width_to_agaw(max_gaw);
453 if (test_bit(agaw, &sagaw))
461 * Calculate max SAGAW for each iommu.
463 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
465 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
469 * calculate agaw for each iommu.
470 * "SAGAW" may be different across iommus, use a default agaw, and
471 * get a supported less agaw for iommus that don't support the default agaw.
473 int iommu_calculate_agaw(struct intel_iommu *iommu)
475 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
478 /* This functionin only returns single iommu in a domain */
479 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
483 /* si_domain and vm domain should not get here. */
484 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
485 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
487 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
488 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
491 return g_iommus[iommu_id];
494 static void domain_update_iommu_coherency(struct dmar_domain *domain)
498 domain->iommu_coherency = 1;
500 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
501 for (; i < g_num_of_iommus; ) {
502 if (!ecap_coherent(g_iommus[i]->ecap)) {
503 domain->iommu_coherency = 0;
506 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
510 static void domain_update_iommu_snooping(struct dmar_domain *domain)
514 domain->iommu_snooping = 1;
516 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
517 for (; i < g_num_of_iommus; ) {
518 if (!ecap_sc_support(g_iommus[i]->ecap)) {
519 domain->iommu_snooping = 0;
522 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
526 /* Some capabilities may be different across iommus */
527 static void domain_update_iommu_cap(struct dmar_domain *domain)
529 domain_update_iommu_coherency(domain);
530 domain_update_iommu_snooping(domain);
533 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
535 struct dmar_drhd_unit *drhd = NULL;
538 for_each_drhd_unit(drhd) {
541 if (segment != drhd->segment)
544 for (i = 0; i < drhd->devices_cnt; i++) {
545 if (drhd->devices[i] &&
546 drhd->devices[i]->bus->number == bus &&
547 drhd->devices[i]->devfn == devfn)
549 if (drhd->devices[i] &&
550 drhd->devices[i]->subordinate &&
551 drhd->devices[i]->subordinate->number <= bus &&
552 drhd->devices[i]->subordinate->subordinate >= bus)
556 if (drhd->include_all)
563 static void domain_flush_cache(struct dmar_domain *domain,
564 void *addr, int size)
566 if (!domain->iommu_coherency)
567 clflush_cache_range(addr, size);
570 /* Gets context entry for a given bus and devfn */
571 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
574 struct root_entry *root;
575 struct context_entry *context;
576 unsigned long phy_addr;
579 spin_lock_irqsave(&iommu->lock, flags);
580 root = &iommu->root_entry[bus];
581 context = get_context_addr_from_root(root);
583 context = (struct context_entry *)alloc_pgtable_page();
585 spin_unlock_irqrestore(&iommu->lock, flags);
588 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
589 phy_addr = virt_to_phys((void *)context);
590 set_root_value(root, phy_addr);
591 set_root_present(root);
592 __iommu_flush_cache(iommu, root, sizeof(*root));
594 spin_unlock_irqrestore(&iommu->lock, flags);
595 return &context[devfn];
598 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
600 struct root_entry *root;
601 struct context_entry *context;
605 spin_lock_irqsave(&iommu->lock, flags);
606 root = &iommu->root_entry[bus];
607 context = get_context_addr_from_root(root);
612 ret = context_present(&context[devfn]);
614 spin_unlock_irqrestore(&iommu->lock, flags);
618 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
620 struct root_entry *root;
621 struct context_entry *context;
624 spin_lock_irqsave(&iommu->lock, flags);
625 root = &iommu->root_entry[bus];
626 context = get_context_addr_from_root(root);
628 context_clear_entry(&context[devfn]);
629 __iommu_flush_cache(iommu, &context[devfn], \
632 spin_unlock_irqrestore(&iommu->lock, flags);
635 static void free_context_table(struct intel_iommu *iommu)
637 struct root_entry *root;
640 struct context_entry *context;
642 spin_lock_irqsave(&iommu->lock, flags);
643 if (!iommu->root_entry) {
646 for (i = 0; i < ROOT_ENTRY_NR; i++) {
647 root = &iommu->root_entry[i];
648 context = get_context_addr_from_root(root);
650 free_pgtable_page(context);
652 free_pgtable_page(iommu->root_entry);
653 iommu->root_entry = NULL;
655 spin_unlock_irqrestore(&iommu->lock, flags);
658 /* page table handling */
659 #define LEVEL_STRIDE (9)
660 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
662 static inline int agaw_to_level(int agaw)
667 static inline int agaw_to_width(int agaw)
669 return 30 + agaw * LEVEL_STRIDE;
673 static inline int width_to_agaw(int width)
675 return (width - 30) / LEVEL_STRIDE;
678 static inline unsigned int level_to_offset_bits(int level)
680 return (level - 1) * LEVEL_STRIDE;
683 static inline int pfn_level_offset(unsigned long pfn, int level)
685 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
688 static inline unsigned long level_mask(int level)
690 return -1UL << level_to_offset_bits(level);
693 static inline unsigned long level_size(int level)
695 return 1UL << level_to_offset_bits(level);
698 static inline unsigned long align_to_level(unsigned long pfn, int level)
700 return (pfn + level_size(level) - 1) & level_mask(level);
703 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
706 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
707 struct dma_pte *parent, *pte = NULL;
708 int level = agaw_to_level(domain->agaw);
711 BUG_ON(!domain->pgd);
712 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
713 parent = domain->pgd;
718 offset = pfn_level_offset(pfn, level);
719 pte = &parent[offset];
723 if (!dma_pte_present(pte)) {
726 tmp_page = alloc_pgtable_page();
731 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
732 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
733 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
734 /* Someone else set it while we were thinking; use theirs. */
735 free_pgtable_page(tmp_page);
738 domain_flush_cache(domain, pte, sizeof(*pte));
741 parent = phys_to_virt(dma_pte_addr(pte));
748 /* return address's pte at specific level */
749 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
753 struct dma_pte *parent, *pte = NULL;
754 int total = agaw_to_level(domain->agaw);
757 parent = domain->pgd;
758 while (level <= total) {
759 offset = pfn_level_offset(pfn, total);
760 pte = &parent[offset];
764 if (!dma_pte_present(pte))
766 parent = phys_to_virt(dma_pte_addr(pte));
772 /* clear last level pte, a tlb flush should be followed */
773 static void dma_pte_clear_range(struct dmar_domain *domain,
774 unsigned long start_pfn,
775 unsigned long last_pfn)
777 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
778 struct dma_pte *first_pte, *pte;
780 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
781 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
783 /* we don't need lock here; nobody else touches the iova range */
784 while (start_pfn <= last_pfn) {
785 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
787 start_pfn = align_to_level(start_pfn + 1, 2);
794 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
796 domain_flush_cache(domain, first_pte,
797 (void *)pte - (void *)first_pte);
801 /* free page table pages. last level pte should already be cleared */
802 static void dma_pte_free_pagetable(struct dmar_domain *domain,
803 unsigned long start_pfn,
804 unsigned long last_pfn)
806 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
807 struct dma_pte *first_pte, *pte;
808 int total = agaw_to_level(domain->agaw);
812 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
813 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
815 /* We don't need lock here; nobody else touches the iova range */
817 while (level <= total) {
818 tmp = align_to_level(start_pfn, level);
820 /* If we can't even clear one PTE at this level, we're done */
821 if (tmp + level_size(level) - 1 > last_pfn)
824 while (tmp + level_size(level) - 1 <= last_pfn) {
825 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
827 tmp = align_to_level(tmp + 1, level + 1);
831 if (dma_pte_present(pte)) {
832 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
836 tmp += level_size(level);
837 } while (!first_pte_in_page(pte) &&
838 tmp + level_size(level) - 1 <= last_pfn);
840 domain_flush_cache(domain, first_pte,
841 (void *)pte - (void *)first_pte);
847 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
848 free_pgtable_page(domain->pgd);
854 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
856 struct root_entry *root;
859 root = (struct root_entry *)alloc_pgtable_page();
863 __iommu_flush_cache(iommu, root, ROOT_SIZE);
865 spin_lock_irqsave(&iommu->lock, flags);
866 iommu->root_entry = root;
867 spin_unlock_irqrestore(&iommu->lock, flags);
872 static void iommu_set_root_entry(struct intel_iommu *iommu)
878 addr = iommu->root_entry;
880 spin_lock_irqsave(&iommu->register_lock, flag);
881 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
883 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
885 /* Make sure hardware complete it */
886 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
887 readl, (sts & DMA_GSTS_RTPS), sts);
889 spin_unlock_irqrestore(&iommu->register_lock, flag);
892 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
897 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
900 spin_lock_irqsave(&iommu->register_lock, flag);
901 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
903 /* Make sure hardware complete it */
904 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
905 readl, (!(val & DMA_GSTS_WBFS)), val);
907 spin_unlock_irqrestore(&iommu->register_lock, flag);
910 /* return value determine if we need a write buffer flush */
911 static void __iommu_flush_context(struct intel_iommu *iommu,
912 u16 did, u16 source_id, u8 function_mask,
919 case DMA_CCMD_GLOBAL_INVL:
920 val = DMA_CCMD_GLOBAL_INVL;
922 case DMA_CCMD_DOMAIN_INVL:
923 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
925 case DMA_CCMD_DEVICE_INVL:
926 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
927 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
934 spin_lock_irqsave(&iommu->register_lock, flag);
935 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
937 /* Make sure hardware complete it */
938 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
939 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
941 spin_unlock_irqrestore(&iommu->register_lock, flag);
944 /* return value determine if we need a write buffer flush */
945 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
946 u64 addr, unsigned int size_order, u64 type)
948 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
949 u64 val = 0, val_iva = 0;
953 case DMA_TLB_GLOBAL_FLUSH:
954 /* global flush doesn't need set IVA_REG */
955 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
957 case DMA_TLB_DSI_FLUSH:
958 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
960 case DMA_TLB_PSI_FLUSH:
961 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
962 /* Note: always flush non-leaf currently */
963 val_iva = size_order | addr;
968 /* Note: set drain read/write */
971 * This is probably to be super secure.. Looks like we can
972 * ignore it without any impact.
974 if (cap_read_drain(iommu->cap))
975 val |= DMA_TLB_READ_DRAIN;
977 if (cap_write_drain(iommu->cap))
978 val |= DMA_TLB_WRITE_DRAIN;
980 spin_lock_irqsave(&iommu->register_lock, flag);
981 /* Note: Only uses first TLB reg currently */
983 dmar_writeq(iommu->reg + tlb_offset, val_iva);
984 dmar_writeq(iommu->reg + tlb_offset + 8, val);
986 /* Make sure hardware complete it */
987 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
988 dmar_readq, (!(val & DMA_TLB_IVT)), val);
990 spin_unlock_irqrestore(&iommu->register_lock, flag);
992 /* check IOTLB invalidation granularity */
993 if (DMA_TLB_IAIG(val) == 0)
994 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
995 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
996 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
997 (unsigned long long)DMA_TLB_IIRG(type),
998 (unsigned long long)DMA_TLB_IAIG(val));
1001 static struct device_domain_info *iommu_support_dev_iotlb(
1002 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1005 unsigned long flags;
1006 struct device_domain_info *info;
1007 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1009 if (!ecap_dev_iotlb_support(iommu->ecap))
1015 spin_lock_irqsave(&device_domain_lock, flags);
1016 list_for_each_entry(info, &domain->devices, link)
1017 if (info->bus == bus && info->devfn == devfn) {
1021 spin_unlock_irqrestore(&device_domain_lock, flags);
1023 if (!found || !info->dev)
1026 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1029 if (!dmar_find_matched_atsr_unit(info->dev))
1032 info->iommu = iommu;
1037 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1042 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1045 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1047 if (!info->dev || !pci_ats_enabled(info->dev))
1050 pci_disable_ats(info->dev);
1053 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1054 u64 addr, unsigned mask)
1057 unsigned long flags;
1058 struct device_domain_info *info;
1060 spin_lock_irqsave(&device_domain_lock, flags);
1061 list_for_each_entry(info, &domain->devices, link) {
1062 if (!info->dev || !pci_ats_enabled(info->dev))
1065 sid = info->bus << 8 | info->devfn;
1066 qdep = pci_ats_queue_depth(info->dev);
1067 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1069 spin_unlock_irqrestore(&device_domain_lock, flags);
1072 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1073 unsigned long pfn, unsigned int pages)
1075 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1076 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1081 * Fallback to domain selective flush if no PSI support or the size is
1083 * PSI requires page size to be 2 ^ x, and the base address is naturally
1084 * aligned to the size
1086 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1087 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1090 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1094 * In caching mode, domain ID 0 is reserved for non-present to present
1095 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1097 if (!cap_caching_mode(iommu->cap) || did)
1098 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1101 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1104 unsigned long flags;
1106 spin_lock_irqsave(&iommu->register_lock, flags);
1107 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1108 pmen &= ~DMA_PMEN_EPM;
1109 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1111 /* wait for the protected region status bit to clear */
1112 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1113 readl, !(pmen & DMA_PMEN_PRS), pmen);
1115 spin_unlock_irqrestore(&iommu->register_lock, flags);
1118 static int iommu_enable_translation(struct intel_iommu *iommu)
1121 unsigned long flags;
1123 spin_lock_irqsave(&iommu->register_lock, flags);
1124 iommu->gcmd |= DMA_GCMD_TE;
1125 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1127 /* Make sure hardware complete it */
1128 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1129 readl, (sts & DMA_GSTS_TES), sts);
1131 spin_unlock_irqrestore(&iommu->register_lock, flags);
1135 static int iommu_disable_translation(struct intel_iommu *iommu)
1140 spin_lock_irqsave(&iommu->register_lock, flag);
1141 iommu->gcmd &= ~DMA_GCMD_TE;
1142 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1144 /* Make sure hardware complete it */
1145 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1146 readl, (!(sts & DMA_GSTS_TES)), sts);
1148 spin_unlock_irqrestore(&iommu->register_lock, flag);
1153 static int iommu_init_domains(struct intel_iommu *iommu)
1155 unsigned long ndomains;
1156 unsigned long nlongs;
1158 ndomains = cap_ndoms(iommu->cap);
1159 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1160 nlongs = BITS_TO_LONGS(ndomains);
1162 spin_lock_init(&iommu->lock);
1164 /* TBD: there might be 64K domains,
1165 * consider other allocation for future chip
1167 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1168 if (!iommu->domain_ids) {
1169 printk(KERN_ERR "Allocating domain id array failed\n");
1172 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1174 if (!iommu->domains) {
1175 printk(KERN_ERR "Allocating domain array failed\n");
1180 * if Caching mode is set, then invalid translations are tagged
1181 * with domainid 0. Hence we need to pre-allocate it.
1183 if (cap_caching_mode(iommu->cap))
1184 set_bit(0, iommu->domain_ids);
1189 static void domain_exit(struct dmar_domain *domain);
1190 static void vm_domain_exit(struct dmar_domain *domain);
1192 void free_dmar_iommu(struct intel_iommu *iommu)
1194 struct dmar_domain *domain;
1196 unsigned long flags;
1198 if ((iommu->domains) && (iommu->domain_ids)) {
1199 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1200 for (; i < cap_ndoms(iommu->cap); ) {
1201 domain = iommu->domains[i];
1202 clear_bit(i, iommu->domain_ids);
1204 spin_lock_irqsave(&domain->iommu_lock, flags);
1205 if (--domain->iommu_count == 0) {
1206 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1207 vm_domain_exit(domain);
1209 domain_exit(domain);
1211 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1213 i = find_next_bit(iommu->domain_ids,
1214 cap_ndoms(iommu->cap), i+1);
1218 if (iommu->gcmd & DMA_GCMD_TE)
1219 iommu_disable_translation(iommu);
1222 set_irq_data(iommu->irq, NULL);
1223 /* This will mask the irq */
1224 free_irq(iommu->irq, iommu);
1225 destroy_irq(iommu->irq);
1228 kfree(iommu->domains);
1229 kfree(iommu->domain_ids);
1231 g_iommus[iommu->seq_id] = NULL;
1233 /* if all iommus are freed, free g_iommus */
1234 for (i = 0; i < g_num_of_iommus; i++) {
1239 if (i == g_num_of_iommus)
1242 /* free context mapping */
1243 free_context_table(iommu);
1246 static struct dmar_domain *alloc_domain(void)
1248 struct dmar_domain *domain;
1250 domain = alloc_domain_mem();
1254 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1260 static int iommu_attach_domain(struct dmar_domain *domain,
1261 struct intel_iommu *iommu)
1264 unsigned long ndomains;
1265 unsigned long flags;
1267 ndomains = cap_ndoms(iommu->cap);
1269 spin_lock_irqsave(&iommu->lock, flags);
1271 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1272 if (num >= ndomains) {
1273 spin_unlock_irqrestore(&iommu->lock, flags);
1274 printk(KERN_ERR "IOMMU: no free domain ids\n");
1279 set_bit(num, iommu->domain_ids);
1280 set_bit(iommu->seq_id, &domain->iommu_bmp);
1281 iommu->domains[num] = domain;
1282 spin_unlock_irqrestore(&iommu->lock, flags);
1287 static void iommu_detach_domain(struct dmar_domain *domain,
1288 struct intel_iommu *iommu)
1290 unsigned long flags;
1294 spin_lock_irqsave(&iommu->lock, flags);
1295 ndomains = cap_ndoms(iommu->cap);
1296 num = find_first_bit(iommu->domain_ids, ndomains);
1297 for (; num < ndomains; ) {
1298 if (iommu->domains[num] == domain) {
1302 num = find_next_bit(iommu->domain_ids,
1303 cap_ndoms(iommu->cap), num+1);
1307 clear_bit(num, iommu->domain_ids);
1308 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1309 iommu->domains[num] = NULL;
1311 spin_unlock_irqrestore(&iommu->lock, flags);
1314 static struct iova_domain reserved_iova_list;
1315 static struct lock_class_key reserved_rbtree_key;
1317 static void dmar_init_reserved_ranges(void)
1319 struct pci_dev *pdev = NULL;
1323 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1325 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1326 &reserved_rbtree_key);
1328 /* IOAPIC ranges shouldn't be accessed by DMA */
1329 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1330 IOVA_PFN(IOAPIC_RANGE_END));
1332 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1334 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1335 for_each_pci_dev(pdev) {
1338 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1339 r = &pdev->resource[i];
1340 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1342 iova = reserve_iova(&reserved_iova_list,
1346 printk(KERN_ERR "Reserve iova failed\n");
1352 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1354 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1357 static inline int guestwidth_to_adjustwidth(int gaw)
1360 int r = (gaw - 12) % 9;
1371 static int domain_init(struct dmar_domain *domain, int guest_width)
1373 struct intel_iommu *iommu;
1374 int adjust_width, agaw;
1375 unsigned long sagaw;
1377 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1378 spin_lock_init(&domain->iommu_lock);
1380 domain_reserve_special_ranges(domain);
1382 /* calculate AGAW */
1383 iommu = domain_get_iommu(domain);
1384 if (guest_width > cap_mgaw(iommu->cap))
1385 guest_width = cap_mgaw(iommu->cap);
1386 domain->gaw = guest_width;
1387 adjust_width = guestwidth_to_adjustwidth(guest_width);
1388 agaw = width_to_agaw(adjust_width);
1389 sagaw = cap_sagaw(iommu->cap);
1390 if (!test_bit(agaw, &sagaw)) {
1391 /* hardware doesn't support it, choose a bigger one */
1392 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1393 agaw = find_next_bit(&sagaw, 5, agaw);
1397 domain->agaw = agaw;
1398 INIT_LIST_HEAD(&domain->devices);
1400 if (ecap_coherent(iommu->ecap))
1401 domain->iommu_coherency = 1;
1403 domain->iommu_coherency = 0;
1405 if (ecap_sc_support(iommu->ecap))
1406 domain->iommu_snooping = 1;
1408 domain->iommu_snooping = 0;
1410 domain->iommu_count = 1;
1412 /* always allocate the top pgd */
1413 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1416 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1420 static void domain_exit(struct dmar_domain *domain)
1422 struct dmar_drhd_unit *drhd;
1423 struct intel_iommu *iommu;
1425 /* Domain 0 is reserved, so dont process it */
1429 domain_remove_dev_info(domain);
1431 put_iova_domain(&domain->iovad);
1434 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1436 /* free page tables */
1437 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1439 for_each_active_iommu(iommu, drhd)
1440 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1441 iommu_detach_domain(domain, iommu);
1443 free_domain_mem(domain);
1446 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1447 u8 bus, u8 devfn, int translation)
1449 struct context_entry *context;
1450 unsigned long flags;
1451 struct intel_iommu *iommu;
1452 struct dma_pte *pgd;
1454 unsigned long ndomains;
1457 struct device_domain_info *info = NULL;
1459 pr_debug("Set context mapping for %02x:%02x.%d\n",
1460 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1462 BUG_ON(!domain->pgd);
1463 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1464 translation != CONTEXT_TT_MULTI_LEVEL);
1466 iommu = device_to_iommu(segment, bus, devfn);
1470 context = device_to_context_entry(iommu, bus, devfn);
1473 spin_lock_irqsave(&iommu->lock, flags);
1474 if (context_present(context)) {
1475 spin_unlock_irqrestore(&iommu->lock, flags);
1482 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1483 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1486 /* find an available domain id for this device in iommu */
1487 ndomains = cap_ndoms(iommu->cap);
1488 num = find_first_bit(iommu->domain_ids, ndomains);
1489 for (; num < ndomains; ) {
1490 if (iommu->domains[num] == domain) {
1495 num = find_next_bit(iommu->domain_ids,
1496 cap_ndoms(iommu->cap), num+1);
1500 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1501 if (num >= ndomains) {
1502 spin_unlock_irqrestore(&iommu->lock, flags);
1503 printk(KERN_ERR "IOMMU: no free domain ids\n");
1507 set_bit(num, iommu->domain_ids);
1508 iommu->domains[num] = domain;
1512 /* Skip top levels of page tables for
1513 * iommu which has less agaw than default.
1515 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1516 pgd = phys_to_virt(dma_pte_addr(pgd));
1517 if (!dma_pte_present(pgd)) {
1518 spin_unlock_irqrestore(&iommu->lock, flags);
1524 context_set_domain_id(context, id);
1526 if (translation != CONTEXT_TT_PASS_THROUGH) {
1527 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1528 translation = info ? CONTEXT_TT_DEV_IOTLB :
1529 CONTEXT_TT_MULTI_LEVEL;
1532 * In pass through mode, AW must be programmed to indicate the largest
1533 * AGAW value supported by hardware. And ASR is ignored by hardware.
1535 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1536 context_set_address_width(context, iommu->msagaw);
1538 context_set_address_root(context, virt_to_phys(pgd));
1539 context_set_address_width(context, iommu->agaw);
1542 context_set_translation_type(context, translation);
1543 context_set_fault_enable(context);
1544 context_set_present(context);
1545 domain_flush_cache(domain, context, sizeof(*context));
1548 * It's a non-present to present mapping. If hardware doesn't cache
1549 * non-present entry we only need to flush the write-buffer. If the
1550 * _does_ cache non-present entries, then it does so in the special
1551 * domain #0, which we have to flush:
1553 if (cap_caching_mode(iommu->cap)) {
1554 iommu->flush.flush_context(iommu, 0,
1555 (((u16)bus) << 8) | devfn,
1556 DMA_CCMD_MASK_NOBIT,
1557 DMA_CCMD_DEVICE_INVL);
1558 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1560 iommu_flush_write_buffer(iommu);
1562 iommu_enable_dev_iotlb(info);
1563 spin_unlock_irqrestore(&iommu->lock, flags);
1565 spin_lock_irqsave(&domain->iommu_lock, flags);
1566 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1567 domain->iommu_count++;
1568 domain_update_iommu_cap(domain);
1570 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1575 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1579 struct pci_dev *tmp, *parent;
1581 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1582 pdev->bus->number, pdev->devfn,
1587 /* dependent device mapping */
1588 tmp = pci_find_upstream_pcie_bridge(pdev);
1591 /* Secondary interface's bus number and devfn 0 */
1592 parent = pdev->bus->self;
1593 while (parent != tmp) {
1594 ret = domain_context_mapping_one(domain,
1595 pci_domain_nr(parent->bus),
1596 parent->bus->number,
1597 parent->devfn, translation);
1600 parent = parent->bus->self;
1602 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1603 return domain_context_mapping_one(domain,
1604 pci_domain_nr(tmp->subordinate),
1605 tmp->subordinate->number, 0,
1607 else /* this is a legacy PCI bridge */
1608 return domain_context_mapping_one(domain,
1609 pci_domain_nr(tmp->bus),
1615 static int domain_context_mapped(struct pci_dev *pdev)
1618 struct pci_dev *tmp, *parent;
1619 struct intel_iommu *iommu;
1621 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1626 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1629 /* dependent device mapping */
1630 tmp = pci_find_upstream_pcie_bridge(pdev);
1633 /* Secondary interface's bus number and devfn 0 */
1634 parent = pdev->bus->self;
1635 while (parent != tmp) {
1636 ret = device_context_mapped(iommu, parent->bus->number,
1640 parent = parent->bus->self;
1643 return device_context_mapped(iommu, tmp->subordinate->number,
1646 return device_context_mapped(iommu, tmp->bus->number,
1650 /* Returns a number of VTD pages, but aligned to MM page size */
1651 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1654 host_addr &= ~PAGE_MASK;
1655 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1658 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1659 struct scatterlist *sg, unsigned long phys_pfn,
1660 unsigned long nr_pages, int prot)
1662 struct dma_pte *first_pte = NULL, *pte = NULL;
1663 phys_addr_t uninitialized_var(pteval);
1664 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1665 unsigned long sg_res;
1667 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1669 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1672 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1677 sg_res = nr_pages + 1;
1678 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1681 while (nr_pages--) {
1685 sg_res = aligned_nrpages(sg->offset, sg->length);
1686 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1687 sg->dma_length = sg->length;
1688 pteval = page_to_phys(sg_page(sg)) | prot;
1691 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1695 /* We don't need lock here, nobody else
1696 * touches the iova range
1698 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1700 static int dumps = 5;
1701 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1702 iov_pfn, tmp, (unsigned long long)pteval);
1705 debug_dma_dump_mappings(NULL);
1710 if (!nr_pages || first_pte_in_page(pte)) {
1711 domain_flush_cache(domain, first_pte,
1712 (void *)pte - (void *)first_pte);
1716 pteval += VTD_PAGE_SIZE;
1724 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1725 struct scatterlist *sg, unsigned long nr_pages,
1728 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1731 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1732 unsigned long phys_pfn, unsigned long nr_pages,
1735 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1738 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1743 clear_context_table(iommu, bus, devfn);
1744 iommu->flush.flush_context(iommu, 0, 0, 0,
1745 DMA_CCMD_GLOBAL_INVL);
1746 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1749 static void domain_remove_dev_info(struct dmar_domain *domain)
1751 struct device_domain_info *info;
1752 unsigned long flags;
1753 struct intel_iommu *iommu;
1755 spin_lock_irqsave(&device_domain_lock, flags);
1756 while (!list_empty(&domain->devices)) {
1757 info = list_entry(domain->devices.next,
1758 struct device_domain_info, link);
1759 list_del(&info->link);
1760 list_del(&info->global);
1762 info->dev->dev.archdata.iommu = NULL;
1763 spin_unlock_irqrestore(&device_domain_lock, flags);
1765 iommu_disable_dev_iotlb(info);
1766 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1767 iommu_detach_dev(iommu, info->bus, info->devfn);
1768 free_devinfo_mem(info);
1770 spin_lock_irqsave(&device_domain_lock, flags);
1772 spin_unlock_irqrestore(&device_domain_lock, flags);
1777 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1779 static struct dmar_domain *
1780 find_domain(struct pci_dev *pdev)
1782 struct device_domain_info *info;
1784 /* No lock here, assumes no domain exit in normal case */
1785 info = pdev->dev.archdata.iommu;
1787 return info->domain;
1791 /* domain is initialized */
1792 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1794 struct dmar_domain *domain, *found = NULL;
1795 struct intel_iommu *iommu;
1796 struct dmar_drhd_unit *drhd;
1797 struct device_domain_info *info, *tmp;
1798 struct pci_dev *dev_tmp;
1799 unsigned long flags;
1800 int bus = 0, devfn = 0;
1804 domain = find_domain(pdev);
1808 segment = pci_domain_nr(pdev->bus);
1810 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1812 if (dev_tmp->is_pcie) {
1813 bus = dev_tmp->subordinate->number;
1816 bus = dev_tmp->bus->number;
1817 devfn = dev_tmp->devfn;
1819 spin_lock_irqsave(&device_domain_lock, flags);
1820 list_for_each_entry(info, &device_domain_list, global) {
1821 if (info->segment == segment &&
1822 info->bus == bus && info->devfn == devfn) {
1823 found = info->domain;
1827 spin_unlock_irqrestore(&device_domain_lock, flags);
1828 /* pcie-pci bridge already has a domain, uses it */
1835 domain = alloc_domain();
1839 /* Allocate new domain for the device */
1840 drhd = dmar_find_matched_drhd_unit(pdev);
1842 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1846 iommu = drhd->iommu;
1848 ret = iommu_attach_domain(domain, iommu);
1850 domain_exit(domain);
1854 if (domain_init(domain, gaw)) {
1855 domain_exit(domain);
1859 /* register pcie-to-pci device */
1861 info = alloc_devinfo_mem();
1863 domain_exit(domain);
1866 info->segment = segment;
1868 info->devfn = devfn;
1870 info->domain = domain;
1871 /* This domain is shared by devices under p2p bridge */
1872 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1874 /* pcie-to-pci bridge already has a domain, uses it */
1876 spin_lock_irqsave(&device_domain_lock, flags);
1877 list_for_each_entry(tmp, &device_domain_list, global) {
1878 if (tmp->segment == segment &&
1879 tmp->bus == bus && tmp->devfn == devfn) {
1880 found = tmp->domain;
1885 free_devinfo_mem(info);
1886 domain_exit(domain);
1889 list_add(&info->link, &domain->devices);
1890 list_add(&info->global, &device_domain_list);
1892 spin_unlock_irqrestore(&device_domain_lock, flags);
1896 info = alloc_devinfo_mem();
1899 info->segment = segment;
1900 info->bus = pdev->bus->number;
1901 info->devfn = pdev->devfn;
1903 info->domain = domain;
1904 spin_lock_irqsave(&device_domain_lock, flags);
1905 /* somebody is fast */
1906 found = find_domain(pdev);
1907 if (found != NULL) {
1908 spin_unlock_irqrestore(&device_domain_lock, flags);
1909 if (found != domain) {
1910 domain_exit(domain);
1913 free_devinfo_mem(info);
1916 list_add(&info->link, &domain->devices);
1917 list_add(&info->global, &device_domain_list);
1918 pdev->dev.archdata.iommu = info;
1919 spin_unlock_irqrestore(&device_domain_lock, flags);
1922 /* recheck it here, maybe others set it */
1923 return find_domain(pdev);
1926 static int iommu_identity_mapping;
1928 static int iommu_domain_identity_map(struct dmar_domain *domain,
1929 unsigned long long start,
1930 unsigned long long end)
1932 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1933 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1935 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1936 dma_to_mm_pfn(last_vpfn))) {
1937 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1941 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1942 start, end, domain->id);
1944 * RMRR range might have overlap with physical memory range,
1947 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1949 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1950 last_vpfn - first_vpfn + 1,
1951 DMA_PTE_READ|DMA_PTE_WRITE);
1954 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1955 unsigned long long start,
1956 unsigned long long end)
1958 struct dmar_domain *domain;
1961 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1965 /* For _hardware_ passthrough, don't bother. But for software
1966 passthrough, we do it anyway -- it may indicate a memory
1967 range which is reserved in E820, so which didn't get set
1968 up to start with in si_domain */
1969 if (domain == si_domain && hw_pass_through) {
1970 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1971 pci_name(pdev), start, end);
1976 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1977 pci_name(pdev), start, end);
1979 if (end >> agaw_to_width(domain->agaw)) {
1980 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1981 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1982 agaw_to_width(domain->agaw),
1983 dmi_get_system_info(DMI_BIOS_VENDOR),
1984 dmi_get_system_info(DMI_BIOS_VERSION),
1985 dmi_get_system_info(DMI_PRODUCT_VERSION));
1990 ret = iommu_domain_identity_map(domain, start, end);
1994 /* context entry init */
1995 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2002 domain_exit(domain);
2006 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2007 struct pci_dev *pdev)
2009 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2011 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2012 rmrr->end_address + 1);
2015 #ifdef CONFIG_DMAR_FLOPPY_WA
2016 static inline void iommu_prepare_isa(void)
2018 struct pci_dev *pdev;
2021 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2025 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2026 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2029 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2030 "floppy might not work\n");
2034 static inline void iommu_prepare_isa(void)
2038 #endif /* !CONFIG_DMAR_FLPY_WA */
2040 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2042 static int __init si_domain_work_fn(unsigned long start_pfn,
2043 unsigned long end_pfn, void *datax)
2047 *ret = iommu_domain_identity_map(si_domain,
2048 (uint64_t)start_pfn << PAGE_SHIFT,
2049 (uint64_t)end_pfn << PAGE_SHIFT);
2054 static int __init si_domain_init(int hw)
2056 struct dmar_drhd_unit *drhd;
2057 struct intel_iommu *iommu;
2060 si_domain = alloc_domain();
2064 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2066 for_each_active_iommu(iommu, drhd) {
2067 ret = iommu_attach_domain(si_domain, iommu);
2069 domain_exit(si_domain);
2074 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2075 domain_exit(si_domain);
2079 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2084 for_each_online_node(nid) {
2085 work_with_active_regions(nid, si_domain_work_fn, &ret);
2093 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2094 struct pci_dev *pdev);
2095 static int identity_mapping(struct pci_dev *pdev)
2097 struct device_domain_info *info;
2099 if (likely(!iommu_identity_mapping))
2103 list_for_each_entry(info, &si_domain->devices, link)
2104 if (info->dev == pdev)
2109 static int domain_add_dev_info(struct dmar_domain *domain,
2110 struct pci_dev *pdev,
2113 struct device_domain_info *info;
2114 unsigned long flags;
2117 info = alloc_devinfo_mem();
2121 ret = domain_context_mapping(domain, pdev, translation);
2123 free_devinfo_mem(info);
2127 info->segment = pci_domain_nr(pdev->bus);
2128 info->bus = pdev->bus->number;
2129 info->devfn = pdev->devfn;
2131 info->domain = domain;
2133 spin_lock_irqsave(&device_domain_lock, flags);
2134 list_add(&info->link, &domain->devices);
2135 list_add(&info->global, &device_domain_list);
2136 pdev->dev.archdata.iommu = info;
2137 spin_unlock_irqrestore(&device_domain_lock, flags);
2142 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2144 if (iommu_identity_mapping == 2)
2145 return IS_GFX_DEVICE(pdev);
2148 * We want to start off with all devices in the 1:1 domain, and
2149 * take them out later if we find they can't access all of memory.
2151 * However, we can't do this for PCI devices behind bridges,
2152 * because all PCI devices behind the same bridge will end up
2153 * with the same source-id on their transactions.
2155 * Practically speaking, we can't change things around for these
2156 * devices at run-time, because we can't be sure there'll be no
2157 * DMA transactions in flight for any of their siblings.
2159 * So PCI devices (unless they're on the root bus) as well as
2160 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2161 * the 1:1 domain, just in _case_ one of their siblings turns out
2162 * not to be able to map all of memory.
2164 if (!pdev->is_pcie) {
2165 if (!pci_is_root_bus(pdev->bus))
2167 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2169 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2173 * At boot time, we don't yet know if devices will be 64-bit capable.
2174 * Assume that they will -- if they turn out not to be, then we can
2175 * take them out of the 1:1 domain later.
2178 return pdev->dma_mask > DMA_BIT_MASK(32);
2183 static int __init iommu_prepare_static_identity_mapping(int hw)
2185 struct pci_dev *pdev = NULL;
2188 ret = si_domain_init(hw);
2192 for_each_pci_dev(pdev) {
2193 if (iommu_should_identity_map(pdev, 1)) {
2194 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2195 hw ? "hardware" : "software", pci_name(pdev));
2197 ret = domain_add_dev_info(si_domain, pdev,
2198 hw ? CONTEXT_TT_PASS_THROUGH :
2199 CONTEXT_TT_MULTI_LEVEL);
2208 int __init init_dmars(void)
2210 struct dmar_drhd_unit *drhd;
2211 struct dmar_rmrr_unit *rmrr;
2212 struct pci_dev *pdev;
2213 struct intel_iommu *iommu;
2219 * initialize and program root entry to not present
2222 for_each_drhd_unit(drhd) {
2225 * lock not needed as this is only incremented in the single
2226 * threaded kernel __init code path all other access are read
2231 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2234 printk(KERN_ERR "Allocating global iommu array failed\n");
2239 deferred_flush = kzalloc(g_num_of_iommus *
2240 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2241 if (!deferred_flush) {
2246 for_each_drhd_unit(drhd) {
2250 iommu = drhd->iommu;
2251 g_iommus[iommu->seq_id] = iommu;
2253 ret = iommu_init_domains(iommu);
2259 * we could share the same root & context tables
2260 * amoung all IOMMU's. Need to Split it later.
2262 ret = iommu_alloc_root_entry(iommu);
2264 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2267 if (!ecap_pass_through(iommu->ecap))
2268 hw_pass_through = 0;
2272 * Start from the sane iommu hardware state.
2274 for_each_drhd_unit(drhd) {
2278 iommu = drhd->iommu;
2281 * If the queued invalidation is already initialized by us
2282 * (for example, while enabling interrupt-remapping) then
2283 * we got the things already rolling from a sane state.
2289 * Clear any previous faults.
2291 dmar_fault(-1, iommu);
2293 * Disable queued invalidation if supported and already enabled
2294 * before OS handover.
2296 dmar_disable_qi(iommu);
2299 for_each_drhd_unit(drhd) {
2303 iommu = drhd->iommu;
2305 if (dmar_enable_qi(iommu)) {
2307 * Queued Invalidate not enabled, use Register Based
2310 iommu->flush.flush_context = __iommu_flush_context;
2311 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2312 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2314 (unsigned long long)drhd->reg_base_addr);
2316 iommu->flush.flush_context = qi_flush_context;
2317 iommu->flush.flush_iotlb = qi_flush_iotlb;
2318 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2320 (unsigned long long)drhd->reg_base_addr);
2324 if (iommu_pass_through)
2325 iommu_identity_mapping = 1;
2326 #ifdef CONFIG_DMAR_BROKEN_GFX_WA
2328 iommu_identity_mapping = 2;
2331 * If pass through is not set or not enabled, setup context entries for
2332 * identity mappings for rmrr, gfx, and isa and may fall back to static
2333 * identity mapping if iommu_identity_mapping is set.
2335 if (iommu_identity_mapping) {
2336 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2338 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2344 * for each dev attached to rmrr
2346 * locate drhd for dev, alloc domain for dev
2347 * allocate free domain
2348 * allocate page table entries for rmrr
2349 * if context not allocated for bus
2350 * allocate and init context
2351 * set present in root table for this bus
2352 * init context with domain, translation etc
2356 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2357 for_each_rmrr_units(rmrr) {
2358 for (i = 0; i < rmrr->devices_cnt; i++) {
2359 pdev = rmrr->devices[i];
2361 * some BIOS lists non-exist devices in DMAR
2366 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2369 "IOMMU: mapping reserved region failed\n");
2373 iommu_prepare_isa();
2378 * global invalidate context cache
2379 * global invalidate iotlb
2380 * enable translation
2382 for_each_drhd_unit(drhd) {
2385 iommu = drhd->iommu;
2387 iommu_flush_write_buffer(iommu);
2389 ret = dmar_set_interrupt(iommu);
2393 iommu_set_root_entry(iommu);
2395 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2396 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2397 iommu_disable_protect_mem_regions(iommu);
2399 ret = iommu_enable_translation(iommu);
2406 for_each_drhd_unit(drhd) {
2409 iommu = drhd->iommu;
2416 /* This takes a number of _MM_ pages, not VTD pages */
2417 static struct iova *intel_alloc_iova(struct device *dev,
2418 struct dmar_domain *domain,
2419 unsigned long nrpages, uint64_t dma_mask)
2421 struct pci_dev *pdev = to_pci_dev(dev);
2422 struct iova *iova = NULL;
2424 /* Restrict dma_mask to the width that the iommu can handle */
2425 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2427 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2429 * First try to allocate an io virtual address in
2430 * DMA_BIT_MASK(32) and if that fails then try allocating
2433 iova = alloc_iova(&domain->iovad, nrpages,
2434 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2438 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2439 if (unlikely(!iova)) {
2440 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2441 nrpages, pci_name(pdev));
2448 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2450 struct dmar_domain *domain;
2453 domain = get_domain_for_dev(pdev,
2454 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2457 "Allocating domain for %s failed", pci_name(pdev));
2461 /* make sure context mapping is ok */
2462 if (unlikely(!domain_context_mapped(pdev))) {
2463 ret = domain_context_mapping(domain, pdev,
2464 CONTEXT_TT_MULTI_LEVEL);
2467 "Domain context map for %s failed",
2476 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2478 struct device_domain_info *info;
2480 /* No lock here, assumes no domain exit in normal case */
2481 info = dev->dev.archdata.iommu;
2483 return info->domain;
2485 return __get_valid_domain_for_dev(dev);
2488 static int iommu_dummy(struct pci_dev *pdev)
2490 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2493 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2494 static int iommu_no_mapping(struct device *dev)
2496 struct pci_dev *pdev;
2499 if (unlikely(dev->bus != &pci_bus_type))
2502 pdev = to_pci_dev(dev);
2503 if (iommu_dummy(pdev))
2506 if (!iommu_identity_mapping)
2509 found = identity_mapping(pdev);
2511 if (iommu_should_identity_map(pdev, 0))
2515 * 32 bit DMA is removed from si_domain and fall back
2516 * to non-identity mapping.
2518 domain_remove_one_dev_info(si_domain, pdev);
2519 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2525 * In case of a detached 64 bit DMA device from vm, the device
2526 * is put into si_domain for identity mapping.
2528 if (iommu_should_identity_map(pdev, 0)) {
2530 ret = domain_add_dev_info(si_domain, pdev,
2532 CONTEXT_TT_PASS_THROUGH :
2533 CONTEXT_TT_MULTI_LEVEL);
2535 printk(KERN_INFO "64bit %s uses identity mapping\n",
2545 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2546 size_t size, int dir, u64 dma_mask)
2548 struct pci_dev *pdev = to_pci_dev(hwdev);
2549 struct dmar_domain *domain;
2550 phys_addr_t start_paddr;
2554 struct intel_iommu *iommu;
2555 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2557 BUG_ON(dir == DMA_NONE);
2559 if (iommu_no_mapping(hwdev))
2562 domain = get_valid_domain_for_dev(pdev);
2566 iommu = domain_get_iommu(domain);
2567 size = aligned_nrpages(paddr, size);
2569 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2575 * Check if DMAR supports zero-length reads on write only
2578 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2579 !cap_zlr(iommu->cap))
2580 prot |= DMA_PTE_READ;
2581 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2582 prot |= DMA_PTE_WRITE;
2584 * paddr - (paddr + size) might be partial page, we should map the whole
2585 * page. Note: if two part of one page are separately mapped, we
2586 * might have two guest_addr mapping to the same host paddr, but this
2587 * is not a big problem
2589 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2590 mm_to_dma_pfn(paddr_pfn), size, prot);
2594 /* it's a non-present to present mapping. Only flush if caching mode */
2595 if (cap_caching_mode(iommu->cap))
2596 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2598 iommu_flush_write_buffer(iommu);
2600 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2601 start_paddr += paddr & ~PAGE_MASK;
2606 __free_iova(&domain->iovad, iova);
2607 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2608 pci_name(pdev), size, (unsigned long long)paddr, dir);
2612 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2613 unsigned long offset, size_t size,
2614 enum dma_data_direction dir,
2615 struct dma_attrs *attrs)
2617 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2618 dir, to_pci_dev(dev)->dma_mask);
2621 static void flush_unmaps(void)
2627 /* just flush them all */
2628 for (i = 0; i < g_num_of_iommus; i++) {
2629 struct intel_iommu *iommu = g_iommus[i];
2633 if (!deferred_flush[i].next)
2636 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2637 DMA_TLB_GLOBAL_FLUSH);
2638 for (j = 0; j < deferred_flush[i].next; j++) {
2640 struct iova *iova = deferred_flush[i].iova[j];
2642 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2643 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2644 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2645 iova->pfn_lo << PAGE_SHIFT, mask);
2646 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2648 deferred_flush[i].next = 0;
2654 static void flush_unmaps_timeout(unsigned long data)
2656 unsigned long flags;
2658 spin_lock_irqsave(&async_umap_flush_lock, flags);
2660 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2663 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2665 unsigned long flags;
2667 struct intel_iommu *iommu;
2669 spin_lock_irqsave(&async_umap_flush_lock, flags);
2670 if (list_size == HIGH_WATER_MARK)
2673 iommu = domain_get_iommu(dom);
2674 iommu_id = iommu->seq_id;
2676 next = deferred_flush[iommu_id].next;
2677 deferred_flush[iommu_id].domain[next] = dom;
2678 deferred_flush[iommu_id].iova[next] = iova;
2679 deferred_flush[iommu_id].next++;
2682 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2686 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2689 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2690 size_t size, enum dma_data_direction dir,
2691 struct dma_attrs *attrs)
2693 struct pci_dev *pdev = to_pci_dev(dev);
2694 struct dmar_domain *domain;
2695 unsigned long start_pfn, last_pfn;
2697 struct intel_iommu *iommu;
2699 if (iommu_no_mapping(dev))
2702 domain = find_domain(pdev);
2705 iommu = domain_get_iommu(domain);
2707 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2708 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2709 (unsigned long long)dev_addr))
2712 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2713 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2715 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2716 pci_name(pdev), start_pfn, last_pfn);
2718 /* clear the whole page */
2719 dma_pte_clear_range(domain, start_pfn, last_pfn);
2721 /* free page tables */
2722 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2724 if (intel_iommu_strict) {
2725 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2726 last_pfn - start_pfn + 1);
2728 __free_iova(&domain->iovad, iova);
2730 add_unmap(domain, iova);
2732 * queue up the release of the unmap to save the 1/6th of the
2733 * cpu used up by the iotlb flush operation...
2738 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2739 dma_addr_t *dma_handle, gfp_t flags)
2744 size = PAGE_ALIGN(size);
2745 order = get_order(size);
2746 flags &= ~(GFP_DMA | GFP_DMA32);
2748 vaddr = (void *)__get_free_pages(flags, order);
2751 memset(vaddr, 0, size);
2753 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2755 hwdev->coherent_dma_mask);
2758 free_pages((unsigned long)vaddr, order);
2762 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2763 dma_addr_t dma_handle)
2767 size = PAGE_ALIGN(size);
2768 order = get_order(size);
2770 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2771 free_pages((unsigned long)vaddr, order);
2774 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2775 int nelems, enum dma_data_direction dir,
2776 struct dma_attrs *attrs)
2778 struct pci_dev *pdev = to_pci_dev(hwdev);
2779 struct dmar_domain *domain;
2780 unsigned long start_pfn, last_pfn;
2782 struct intel_iommu *iommu;
2784 if (iommu_no_mapping(hwdev))
2787 domain = find_domain(pdev);
2790 iommu = domain_get_iommu(domain);
2792 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2793 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2794 (unsigned long long)sglist[0].dma_address))
2797 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2798 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2800 /* clear the whole page */
2801 dma_pte_clear_range(domain, start_pfn, last_pfn);
2803 /* free page tables */
2804 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2806 if (intel_iommu_strict) {
2807 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2808 last_pfn - start_pfn + 1);
2810 __free_iova(&domain->iovad, iova);
2812 add_unmap(domain, iova);
2814 * queue up the release of the unmap to save the 1/6th of the
2815 * cpu used up by the iotlb flush operation...
2820 static int intel_nontranslate_map_sg(struct device *hddev,
2821 struct scatterlist *sglist, int nelems, int dir)
2824 struct scatterlist *sg;
2826 for_each_sg(sglist, sg, nelems, i) {
2827 BUG_ON(!sg_page(sg));
2828 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2829 sg->dma_length = sg->length;
2834 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2835 enum dma_data_direction dir, struct dma_attrs *attrs)
2838 struct pci_dev *pdev = to_pci_dev(hwdev);
2839 struct dmar_domain *domain;
2842 size_t offset_pfn = 0;
2843 struct iova *iova = NULL;
2845 struct scatterlist *sg;
2846 unsigned long start_vpfn;
2847 struct intel_iommu *iommu;
2849 BUG_ON(dir == DMA_NONE);
2850 if (iommu_no_mapping(hwdev))
2851 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2853 domain = get_valid_domain_for_dev(pdev);
2857 iommu = domain_get_iommu(domain);
2859 for_each_sg(sglist, sg, nelems, i)
2860 size += aligned_nrpages(sg->offset, sg->length);
2862 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2865 sglist->dma_length = 0;
2870 * Check if DMAR supports zero-length reads on write only
2873 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2874 !cap_zlr(iommu->cap))
2875 prot |= DMA_PTE_READ;
2876 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2877 prot |= DMA_PTE_WRITE;
2879 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2881 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2882 if (unlikely(ret)) {
2883 /* clear the page */
2884 dma_pte_clear_range(domain, start_vpfn,
2885 start_vpfn + size - 1);
2886 /* free page tables */
2887 dma_pte_free_pagetable(domain, start_vpfn,
2888 start_vpfn + size - 1);
2890 __free_iova(&domain->iovad, iova);
2894 /* it's a non-present to present mapping. Only flush if caching mode */
2895 if (cap_caching_mode(iommu->cap))
2896 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2898 iommu_flush_write_buffer(iommu);
2903 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2908 struct dma_map_ops intel_dma_ops = {
2909 .alloc_coherent = intel_alloc_coherent,
2910 .free_coherent = intel_free_coherent,
2911 .map_sg = intel_map_sg,
2912 .unmap_sg = intel_unmap_sg,
2913 .map_page = intel_map_page,
2914 .unmap_page = intel_unmap_page,
2915 .mapping_error = intel_mapping_error,
2918 static inline int iommu_domain_cache_init(void)
2922 iommu_domain_cache = kmem_cache_create("iommu_domain",
2923 sizeof(struct dmar_domain),
2928 if (!iommu_domain_cache) {
2929 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2936 static inline int iommu_devinfo_cache_init(void)
2940 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2941 sizeof(struct device_domain_info),
2945 if (!iommu_devinfo_cache) {
2946 printk(KERN_ERR "Couldn't create devinfo cache\n");
2953 static inline int iommu_iova_cache_init(void)
2957 iommu_iova_cache = kmem_cache_create("iommu_iova",
2958 sizeof(struct iova),
2962 if (!iommu_iova_cache) {
2963 printk(KERN_ERR "Couldn't create iova cache\n");
2970 static int __init iommu_init_mempool(void)
2973 ret = iommu_iova_cache_init();
2977 ret = iommu_domain_cache_init();
2981 ret = iommu_devinfo_cache_init();
2985 kmem_cache_destroy(iommu_domain_cache);
2987 kmem_cache_destroy(iommu_iova_cache);
2992 static void __init iommu_exit_mempool(void)
2994 kmem_cache_destroy(iommu_devinfo_cache);
2995 kmem_cache_destroy(iommu_domain_cache);
2996 kmem_cache_destroy(iommu_iova_cache);
3000 static void __init init_no_remapping_devices(void)
3002 struct dmar_drhd_unit *drhd;
3004 for_each_drhd_unit(drhd) {
3005 if (!drhd->include_all) {
3007 for (i = 0; i < drhd->devices_cnt; i++)
3008 if (drhd->devices[i] != NULL)
3010 /* ignore DMAR unit if no pci devices exist */
3011 if (i == drhd->devices_cnt)
3019 for_each_drhd_unit(drhd) {
3021 if (drhd->ignored || drhd->include_all)
3024 for (i = 0; i < drhd->devices_cnt; i++)
3025 if (drhd->devices[i] &&
3026 !IS_GFX_DEVICE(drhd->devices[i]))
3029 if (i < drhd->devices_cnt)
3032 /* bypass IOMMU if it is just for gfx devices */
3034 for (i = 0; i < drhd->devices_cnt; i++) {
3035 if (!drhd->devices[i])
3037 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3042 #ifdef CONFIG_SUSPEND
3043 static int init_iommu_hw(void)
3045 struct dmar_drhd_unit *drhd;
3046 struct intel_iommu *iommu = NULL;
3048 for_each_active_iommu(iommu, drhd)
3050 dmar_reenable_qi(iommu);
3052 for_each_active_iommu(iommu, drhd) {
3053 iommu_flush_write_buffer(iommu);
3055 iommu_set_root_entry(iommu);
3057 iommu->flush.flush_context(iommu, 0, 0, 0,
3058 DMA_CCMD_GLOBAL_INVL);
3059 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3060 DMA_TLB_GLOBAL_FLUSH);
3061 iommu_disable_protect_mem_regions(iommu);
3062 iommu_enable_translation(iommu);
3068 static void iommu_flush_all(void)
3070 struct dmar_drhd_unit *drhd;
3071 struct intel_iommu *iommu;
3073 for_each_active_iommu(iommu, drhd) {
3074 iommu->flush.flush_context(iommu, 0, 0, 0,
3075 DMA_CCMD_GLOBAL_INVL);
3076 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3077 DMA_TLB_GLOBAL_FLUSH);
3081 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3083 struct dmar_drhd_unit *drhd;
3084 struct intel_iommu *iommu = NULL;
3087 for_each_active_iommu(iommu, drhd) {
3088 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3090 if (!iommu->iommu_state)
3096 for_each_active_iommu(iommu, drhd) {
3097 iommu_disable_translation(iommu);
3099 spin_lock_irqsave(&iommu->register_lock, flag);
3101 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3102 readl(iommu->reg + DMAR_FECTL_REG);
3103 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3104 readl(iommu->reg + DMAR_FEDATA_REG);
3105 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3106 readl(iommu->reg + DMAR_FEADDR_REG);
3107 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3108 readl(iommu->reg + DMAR_FEUADDR_REG);
3110 spin_unlock_irqrestore(&iommu->register_lock, flag);
3115 for_each_active_iommu(iommu, drhd)
3116 kfree(iommu->iommu_state);
3121 static int iommu_resume(struct sys_device *dev)
3123 struct dmar_drhd_unit *drhd;
3124 struct intel_iommu *iommu = NULL;
3127 if (init_iommu_hw()) {
3128 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3132 for_each_active_iommu(iommu, drhd) {
3134 spin_lock_irqsave(&iommu->register_lock, flag);
3136 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3137 iommu->reg + DMAR_FECTL_REG);
3138 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3139 iommu->reg + DMAR_FEDATA_REG);
3140 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3141 iommu->reg + DMAR_FEADDR_REG);
3142 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3143 iommu->reg + DMAR_FEUADDR_REG);
3145 spin_unlock_irqrestore(&iommu->register_lock, flag);
3148 for_each_active_iommu(iommu, drhd)
3149 kfree(iommu->iommu_state);
3154 static struct sysdev_class iommu_sysclass = {
3156 .resume = iommu_resume,
3157 .suspend = iommu_suspend,
3160 static struct sys_device device_iommu = {
3161 .cls = &iommu_sysclass,
3164 static int __init init_iommu_sysfs(void)
3168 error = sysdev_class_register(&iommu_sysclass);
3172 error = sysdev_register(&device_iommu);
3174 sysdev_class_unregister(&iommu_sysclass);
3180 static int __init init_iommu_sysfs(void)
3184 #endif /* CONFIG_PM */
3186 int __init intel_iommu_init(void)
3190 if (dmar_table_init())
3193 if (dmar_dev_scope_init())
3197 * Check the need for DMA-remapping initialization now.
3198 * Above initialization will also be used by Interrupt-remapping.
3200 if (no_iommu || swiotlb || dmar_disabled)
3203 iommu_init_mempool();
3204 dmar_init_reserved_ranges();
3206 init_no_remapping_devices();
3210 printk(KERN_ERR "IOMMU: dmar init failed\n");
3211 put_iova_domain(&reserved_iova_list);
3212 iommu_exit_mempool();
3216 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3218 init_timer(&unmap_timer);
3220 dma_ops = &intel_dma_ops;
3224 register_iommu(&intel_iommu_ops);
3229 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3230 struct pci_dev *pdev)
3232 struct pci_dev *tmp, *parent;
3234 if (!iommu || !pdev)
3237 /* dependent device detach */
3238 tmp = pci_find_upstream_pcie_bridge(pdev);
3239 /* Secondary interface's bus number and devfn 0 */
3241 parent = pdev->bus->self;
3242 while (parent != tmp) {
3243 iommu_detach_dev(iommu, parent->bus->number,
3245 parent = parent->bus->self;
3247 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3248 iommu_detach_dev(iommu,
3249 tmp->subordinate->number, 0);
3250 else /* this is a legacy PCI bridge */
3251 iommu_detach_dev(iommu, tmp->bus->number,
3256 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3257 struct pci_dev *pdev)
3259 struct device_domain_info *info;
3260 struct intel_iommu *iommu;
3261 unsigned long flags;
3263 struct list_head *entry, *tmp;
3265 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3270 spin_lock_irqsave(&device_domain_lock, flags);
3271 list_for_each_safe(entry, tmp, &domain->devices) {
3272 info = list_entry(entry, struct device_domain_info, link);
3273 /* No need to compare PCI domain; it has to be the same */
3274 if (info->bus == pdev->bus->number &&
3275 info->devfn == pdev->devfn) {
3276 list_del(&info->link);
3277 list_del(&info->global);
3279 info->dev->dev.archdata.iommu = NULL;
3280 spin_unlock_irqrestore(&device_domain_lock, flags);
3282 iommu_disable_dev_iotlb(info);
3283 iommu_detach_dev(iommu, info->bus, info->devfn);
3284 iommu_detach_dependent_devices(iommu, pdev);
3285 free_devinfo_mem(info);
3287 spin_lock_irqsave(&device_domain_lock, flags);
3295 /* if there is no other devices under the same iommu
3296 * owned by this domain, clear this iommu in iommu_bmp
3297 * update iommu count and coherency
3299 if (iommu == device_to_iommu(info->segment, info->bus,
3305 unsigned long tmp_flags;
3306 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3307 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3308 domain->iommu_count--;
3309 domain_update_iommu_cap(domain);
3310 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3313 spin_unlock_irqrestore(&device_domain_lock, flags);
3316 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3318 struct device_domain_info *info;
3319 struct intel_iommu *iommu;
3320 unsigned long flags1, flags2;
3322 spin_lock_irqsave(&device_domain_lock, flags1);
3323 while (!list_empty(&domain->devices)) {
3324 info = list_entry(domain->devices.next,
3325 struct device_domain_info, link);
3326 list_del(&info->link);
3327 list_del(&info->global);
3329 info->dev->dev.archdata.iommu = NULL;
3331 spin_unlock_irqrestore(&device_domain_lock, flags1);
3333 iommu_disable_dev_iotlb(info);
3334 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3335 iommu_detach_dev(iommu, info->bus, info->devfn);
3336 iommu_detach_dependent_devices(iommu, info->dev);
3338 /* clear this iommu in iommu_bmp, update iommu count
3341 spin_lock_irqsave(&domain->iommu_lock, flags2);
3342 if (test_and_clear_bit(iommu->seq_id,
3343 &domain->iommu_bmp)) {
3344 domain->iommu_count--;
3345 domain_update_iommu_cap(domain);
3347 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3349 free_devinfo_mem(info);
3350 spin_lock_irqsave(&device_domain_lock, flags1);
3352 spin_unlock_irqrestore(&device_domain_lock, flags1);
3355 /* domain id for virtual machine, it won't be set in context */
3356 static unsigned long vm_domid;
3358 static int vm_domain_min_agaw(struct dmar_domain *domain)
3361 int min_agaw = domain->agaw;
3363 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3364 for (; i < g_num_of_iommus; ) {
3365 if (min_agaw > g_iommus[i]->agaw)
3366 min_agaw = g_iommus[i]->agaw;
3368 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3374 static struct dmar_domain *iommu_alloc_vm_domain(void)
3376 struct dmar_domain *domain;
3378 domain = alloc_domain_mem();
3382 domain->id = vm_domid++;
3383 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3384 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3389 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3393 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3394 spin_lock_init(&domain->iommu_lock);
3396 domain_reserve_special_ranges(domain);
3398 /* calculate AGAW */
3399 domain->gaw = guest_width;
3400 adjust_width = guestwidth_to_adjustwidth(guest_width);
3401 domain->agaw = width_to_agaw(adjust_width);
3403 INIT_LIST_HEAD(&domain->devices);
3405 domain->iommu_count = 0;
3406 domain->iommu_coherency = 0;
3407 domain->iommu_snooping = 0;
3408 domain->max_addr = 0;
3410 /* always allocate the top pgd */
3411 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3414 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3418 static void iommu_free_vm_domain(struct dmar_domain *domain)
3420 unsigned long flags;
3421 struct dmar_drhd_unit *drhd;
3422 struct intel_iommu *iommu;
3424 unsigned long ndomains;
3426 for_each_drhd_unit(drhd) {
3429 iommu = drhd->iommu;
3431 ndomains = cap_ndoms(iommu->cap);
3432 i = find_first_bit(iommu->domain_ids, ndomains);
3433 for (; i < ndomains; ) {
3434 if (iommu->domains[i] == domain) {
3435 spin_lock_irqsave(&iommu->lock, flags);
3436 clear_bit(i, iommu->domain_ids);
3437 iommu->domains[i] = NULL;
3438 spin_unlock_irqrestore(&iommu->lock, flags);
3441 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3446 static void vm_domain_exit(struct dmar_domain *domain)
3448 /* Domain 0 is reserved, so dont process it */
3452 vm_domain_remove_all_dev_info(domain);
3454 put_iova_domain(&domain->iovad);
3457 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3459 /* free page tables */
3460 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3462 iommu_free_vm_domain(domain);
3463 free_domain_mem(domain);
3466 static int intel_iommu_domain_init(struct iommu_domain *domain)
3468 struct dmar_domain *dmar_domain;
3470 dmar_domain = iommu_alloc_vm_domain();
3473 "intel_iommu_domain_init: dmar_domain == NULL\n");
3476 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3478 "intel_iommu_domain_init() failed\n");
3479 vm_domain_exit(dmar_domain);
3482 domain->priv = dmar_domain;
3487 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3489 struct dmar_domain *dmar_domain = domain->priv;
3491 domain->priv = NULL;
3492 vm_domain_exit(dmar_domain);
3495 static int intel_iommu_attach_device(struct iommu_domain *domain,
3498 struct dmar_domain *dmar_domain = domain->priv;
3499 struct pci_dev *pdev = to_pci_dev(dev);
3500 struct intel_iommu *iommu;
3504 /* normally pdev is not mapped */
3505 if (unlikely(domain_context_mapped(pdev))) {
3506 struct dmar_domain *old_domain;
3508 old_domain = find_domain(pdev);
3510 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3511 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3512 domain_remove_one_dev_info(old_domain, pdev);
3514 domain_remove_dev_info(old_domain);
3518 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3523 /* check if this iommu agaw is sufficient for max mapped address */
3524 addr_width = agaw_to_width(iommu->agaw);
3525 end = DOMAIN_MAX_ADDR(addr_width);
3526 end = end & VTD_PAGE_MASK;
3527 if (end < dmar_domain->max_addr) {
3528 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3529 "sufficient for the mapped address (%llx)\n",
3530 __func__, iommu->agaw, dmar_domain->max_addr);
3534 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3537 static void intel_iommu_detach_device(struct iommu_domain *domain,
3540 struct dmar_domain *dmar_domain = domain->priv;
3541 struct pci_dev *pdev = to_pci_dev(dev);
3543 domain_remove_one_dev_info(dmar_domain, pdev);
3546 static int intel_iommu_map_range(struct iommu_domain *domain,
3547 unsigned long iova, phys_addr_t hpa,
3548 size_t size, int iommu_prot)
3550 struct dmar_domain *dmar_domain = domain->priv;
3556 if (iommu_prot & IOMMU_READ)
3557 prot |= DMA_PTE_READ;
3558 if (iommu_prot & IOMMU_WRITE)
3559 prot |= DMA_PTE_WRITE;
3560 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3561 prot |= DMA_PTE_SNP;
3563 max_addr = iova + size;
3564 if (dmar_domain->max_addr < max_addr) {
3568 /* check if minimum agaw is sufficient for mapped address */
3569 min_agaw = vm_domain_min_agaw(dmar_domain);
3570 addr_width = agaw_to_width(min_agaw);
3571 end = DOMAIN_MAX_ADDR(addr_width);
3572 end = end & VTD_PAGE_MASK;
3573 if (end < max_addr) {
3574 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3575 "sufficient for the mapped address (%llx)\n",
3576 __func__, min_agaw, max_addr);
3579 dmar_domain->max_addr = max_addr;
3581 /* Round up size to next multiple of PAGE_SIZE, if it and
3582 the low bits of hpa would take us onto the next page */
3583 size = aligned_nrpages(hpa, size);
3584 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3585 hpa >> VTD_PAGE_SHIFT, size, prot);
3589 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3590 unsigned long iova, size_t size)
3592 struct dmar_domain *dmar_domain = domain->priv;
3597 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3598 (iova + size - 1) >> VTD_PAGE_SHIFT);
3600 if (dmar_domain->max_addr == iova + size)
3601 dmar_domain->max_addr = iova;
3604 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3607 struct dmar_domain *dmar_domain = domain->priv;
3608 struct dma_pte *pte;
3611 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3613 phys = dma_pte_addr(pte);
3618 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3621 struct dmar_domain *dmar_domain = domain->priv;
3623 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3624 return dmar_domain->iommu_snooping;
3629 static struct iommu_ops intel_iommu_ops = {
3630 .domain_init = intel_iommu_domain_init,
3631 .domain_destroy = intel_iommu_domain_destroy,
3632 .attach_dev = intel_iommu_attach_device,
3633 .detach_dev = intel_iommu_detach_device,
3634 .map = intel_iommu_map_range,
3635 .unmap = intel_iommu_unmap_range,
3636 .iova_to_phys = intel_iommu_iova_to_phys,
3637 .domain_has_cap = intel_iommu_domain_has_cap,
3640 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3643 * Mobile 4 Series Chipset neglects to set RWBF capability,
3646 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3650 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);