Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[cascardo/linux.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  */
19
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <linux/dma-contiguous.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
46
47 #include "irq_remapping.h"
48
49 #define ROOT_SIZE               VTD_PAGE_SIZE
50 #define CONTEXT_SIZE            VTD_PAGE_SIZE
51
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
55
56 #define IOAPIC_RANGE_START      (0xfee00000)
57 #define IOAPIC_RANGE_END        (0xfeefffff)
58 #define IOVA_START_ADDR         (0x1000)
59
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
62 #define MAX_AGAW_WIDTH 64
63 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
64
65 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
71                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73
74 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
75 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
76 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
77
78 /* page table handling */
79 #define LEVEL_STRIDE            (9)
80 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
81
82 /*
83  * This bitmap is used to advertise the page sizes our hardware support
84  * to the IOMMU core, which will then use this information to split
85  * physically contiguous memory regions it is mapping into page sizes
86  * that we support.
87  *
88  * Traditionally the IOMMU core just handed us the mappings directly,
89  * after making sure the size is an order of a 4KiB page and that the
90  * mapping has natural alignment.
91  *
92  * To retain this behavior, we currently advertise that we support
93  * all page sizes that are an order of 4KiB.
94  *
95  * If at some point we'd like to utilize the IOMMU core's new behavior,
96  * we could change this to advertise the real page sizes we support.
97  */
98 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
99
100 static inline int agaw_to_level(int agaw)
101 {
102         return agaw + 2;
103 }
104
105 static inline int agaw_to_width(int agaw)
106 {
107         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
108 }
109
110 static inline int width_to_agaw(int width)
111 {
112         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
113 }
114
115 static inline unsigned int level_to_offset_bits(int level)
116 {
117         return (level - 1) * LEVEL_STRIDE;
118 }
119
120 static inline int pfn_level_offset(unsigned long pfn, int level)
121 {
122         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
123 }
124
125 static inline unsigned long level_mask(int level)
126 {
127         return -1UL << level_to_offset_bits(level);
128 }
129
130 static inline unsigned long level_size(int level)
131 {
132         return 1UL << level_to_offset_bits(level);
133 }
134
135 static inline unsigned long align_to_level(unsigned long pfn, int level)
136 {
137         return (pfn + level_size(level) - 1) & level_mask(level);
138 }
139
140 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141 {
142         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
143 }
144
145 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
146    are never going to work. */
147 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148 {
149         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
150 }
151
152 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153 {
154         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155 }
156 static inline unsigned long page_to_dma_pfn(struct page *pg)
157 {
158         return mm_to_dma_pfn(page_to_pfn(pg));
159 }
160 static inline unsigned long virt_to_dma_pfn(void *p)
161 {
162         return page_to_dma_pfn(virt_to_page(p));
163 }
164
165 /* global iommu list, set NULL for ignored DMAR units */
166 static struct intel_iommu **g_iommus;
167
168 static void __init check_tylersburg_isoch(void);
169 static int rwbf_quirk;
170
171 /*
172  * set to 1 to panic kernel if can't successfully enable VT-d
173  * (used when kernel is launched w/ TXT)
174  */
175 static int force_on = 0;
176
177 /*
178  * 0: Present
179  * 1-11: Reserved
180  * 12-63: Context Ptr (12 - (haw-1))
181  * 64-127: Reserved
182  */
183 struct root_entry {
184         u64     val;
185         u64     rsvd1;
186 };
187 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
188 static inline bool root_present(struct root_entry *root)
189 {
190         return (root->val & 1);
191 }
192 static inline void set_root_present(struct root_entry *root)
193 {
194         root->val |= 1;
195 }
196 static inline void set_root_value(struct root_entry *root, unsigned long value)
197 {
198         root->val |= value & VTD_PAGE_MASK;
199 }
200
201 static inline struct context_entry *
202 get_context_addr_from_root(struct root_entry *root)
203 {
204         return (struct context_entry *)
205                 (root_present(root)?phys_to_virt(
206                 root->val & VTD_PAGE_MASK) :
207                 NULL);
208 }
209
210 /*
211  * low 64 bits:
212  * 0: present
213  * 1: fault processing disable
214  * 2-3: translation type
215  * 12-63: address space root
216  * high 64 bits:
217  * 0-2: address width
218  * 3-6: aval
219  * 8-23: domain id
220  */
221 struct context_entry {
222         u64 lo;
223         u64 hi;
224 };
225
226 static inline bool context_present(struct context_entry *context)
227 {
228         return (context->lo & 1);
229 }
230 static inline void context_set_present(struct context_entry *context)
231 {
232         context->lo |= 1;
233 }
234
235 static inline void context_set_fault_enable(struct context_entry *context)
236 {
237         context->lo &= (((u64)-1) << 2) | 1;
238 }
239
240 static inline void context_set_translation_type(struct context_entry *context,
241                                                 unsigned long value)
242 {
243         context->lo &= (((u64)-1) << 4) | 3;
244         context->lo |= (value & 3) << 2;
245 }
246
247 static inline void context_set_address_root(struct context_entry *context,
248                                             unsigned long value)
249 {
250         context->lo |= value & VTD_PAGE_MASK;
251 }
252
253 static inline void context_set_address_width(struct context_entry *context,
254                                              unsigned long value)
255 {
256         context->hi |= value & 7;
257 }
258
259 static inline void context_set_domain_id(struct context_entry *context,
260                                          unsigned long value)
261 {
262         context->hi |= (value & ((1 << 16) - 1)) << 8;
263 }
264
265 static inline void context_clear_entry(struct context_entry *context)
266 {
267         context->lo = 0;
268         context->hi = 0;
269 }
270
271 /*
272  * 0: readable
273  * 1: writable
274  * 2-6: reserved
275  * 7: super page
276  * 8-10: available
277  * 11: snoop behavior
278  * 12-63: Host physcial address
279  */
280 struct dma_pte {
281         u64 val;
282 };
283
284 static inline void dma_clear_pte(struct dma_pte *pte)
285 {
286         pte->val = 0;
287 }
288
289 static inline u64 dma_pte_addr(struct dma_pte *pte)
290 {
291 #ifdef CONFIG_64BIT
292         return pte->val & VTD_PAGE_MASK;
293 #else
294         /* Must have a full atomic 64-bit read */
295         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
296 #endif
297 }
298
299 static inline bool dma_pte_present(struct dma_pte *pte)
300 {
301         return (pte->val & 3) != 0;
302 }
303
304 static inline bool dma_pte_superpage(struct dma_pte *pte)
305 {
306         return (pte->val & DMA_PTE_LARGE_PAGE);
307 }
308
309 static inline int first_pte_in_page(struct dma_pte *pte)
310 {
311         return !((unsigned long)pte & ~VTD_PAGE_MASK);
312 }
313
314 /*
315  * This domain is a statically identity mapping domain.
316  *      1. This domain creats a static 1:1 mapping to all usable memory.
317  *      2. It maps to each iommu if successful.
318  *      3. Each iommu mapps to this domain if successful.
319  */
320 static struct dmar_domain *si_domain;
321 static int hw_pass_through = 1;
322
323 /* domain represents a virtual machine, more than one devices
324  * across iommus may be owned in one domain, e.g. kvm guest.
325  */
326 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
327
328 /* si_domain contains mulitple devices */
329 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
330
331 /* define the limit of IOMMUs supported in each domain */
332 #ifdef  CONFIG_X86
333 # define        IOMMU_UNITS_SUPPORTED   MAX_IO_APICS
334 #else
335 # define        IOMMU_UNITS_SUPPORTED   64
336 #endif
337
338 struct dmar_domain {
339         int     id;                     /* domain id */
340         int     nid;                    /* node id */
341         DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
342                                         /* bitmap of iommus this domain uses*/
343
344         struct list_head devices;       /* all devices' list */
345         struct iova_domain iovad;       /* iova's that belong to this domain */
346
347         struct dma_pte  *pgd;           /* virtual address */
348         int             gaw;            /* max guest address width */
349
350         /* adjusted guest address width, 0 is level 2 30-bit */
351         int             agaw;
352
353         int             flags;          /* flags to find out type of domain */
354
355         int             iommu_coherency;/* indicate coherency of iommu access */
356         int             iommu_snooping; /* indicate snooping control feature*/
357         int             iommu_count;    /* reference count of iommu */
358         int             iommu_superpage;/* Level of superpages supported:
359                                            0 == 4KiB (no superpages), 1 == 2MiB,
360                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
361         spinlock_t      iommu_lock;     /* protect iommu set in domain */
362         u64             max_addr;       /* maximum mapped address */
363 };
364
365 /* PCI domain-device relationship */
366 struct device_domain_info {
367         struct list_head link;  /* link to domain siblings */
368         struct list_head global; /* link to global list */
369         u8 bus;                 /* PCI bus number */
370         u8 devfn;               /* PCI devfn number */
371         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
372         struct intel_iommu *iommu; /* IOMMU used by this device */
373         struct dmar_domain *domain; /* pointer to domain */
374 };
375
376 struct dmar_rmrr_unit {
377         struct list_head list;          /* list of rmrr units   */
378         struct acpi_dmar_header *hdr;   /* ACPI header          */
379         u64     base_address;           /* reserved base address*/
380         u64     end_address;            /* reserved end address */
381         struct dmar_dev_scope *devices; /* target devices */
382         int     devices_cnt;            /* target device count */
383 };
384
385 struct dmar_atsr_unit {
386         struct list_head list;          /* list of ATSR units */
387         struct acpi_dmar_header *hdr;   /* ACPI header */
388         struct dmar_dev_scope *devices; /* target devices */
389         int devices_cnt;                /* target device count */
390         u8 include_all:1;               /* include all ports */
391 };
392
393 static LIST_HEAD(dmar_atsr_units);
394 static LIST_HEAD(dmar_rmrr_units);
395
396 #define for_each_rmrr_units(rmrr) \
397         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
398
399 static void flush_unmaps_timeout(unsigned long data);
400
401 static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
402
403 #define HIGH_WATER_MARK 250
404 struct deferred_flush_tables {
405         int next;
406         struct iova *iova[HIGH_WATER_MARK];
407         struct dmar_domain *domain[HIGH_WATER_MARK];
408         struct page *freelist[HIGH_WATER_MARK];
409 };
410
411 static struct deferred_flush_tables *deferred_flush;
412
413 /* bitmap for indexing intel_iommus */
414 static int g_num_of_iommus;
415
416 static DEFINE_SPINLOCK(async_umap_flush_lock);
417 static LIST_HEAD(unmaps_to_do);
418
419 static int timer_on;
420 static long list_size;
421
422 static void domain_exit(struct dmar_domain *domain);
423 static void domain_remove_dev_info(struct dmar_domain *domain);
424 static void domain_remove_one_dev_info(struct dmar_domain *domain,
425                                        struct device *dev);
426 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
427                                            struct device *dev);
428 static int domain_detach_iommu(struct dmar_domain *domain,
429                                struct intel_iommu *iommu);
430
431 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
432 int dmar_disabled = 0;
433 #else
434 int dmar_disabled = 1;
435 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
436
437 int intel_iommu_enabled = 0;
438 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
439
440 static int dmar_map_gfx = 1;
441 static int dmar_forcedac;
442 static int intel_iommu_strict;
443 static int intel_iommu_superpage = 1;
444
445 int intel_iommu_gfx_mapped;
446 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
447
448 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449 static DEFINE_SPINLOCK(device_domain_lock);
450 static LIST_HEAD(device_domain_list);
451
452 static const struct iommu_ops intel_iommu_ops;
453
454 static int __init intel_iommu_setup(char *str)
455 {
456         if (!str)
457                 return -EINVAL;
458         while (*str) {
459                 if (!strncmp(str, "on", 2)) {
460                         dmar_disabled = 0;
461                         printk(KERN_INFO "Intel-IOMMU: enabled\n");
462                 } else if (!strncmp(str, "off", 3)) {
463                         dmar_disabled = 1;
464                         printk(KERN_INFO "Intel-IOMMU: disabled\n");
465                 } else if (!strncmp(str, "igfx_off", 8)) {
466                         dmar_map_gfx = 0;
467                         printk(KERN_INFO
468                                 "Intel-IOMMU: disable GFX device mapping\n");
469                 } else if (!strncmp(str, "forcedac", 8)) {
470                         printk(KERN_INFO
471                                 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472                         dmar_forcedac = 1;
473                 } else if (!strncmp(str, "strict", 6)) {
474                         printk(KERN_INFO
475                                 "Intel-IOMMU: disable batched IOTLB flush\n");
476                         intel_iommu_strict = 1;
477                 } else if (!strncmp(str, "sp_off", 6)) {
478                         printk(KERN_INFO
479                                 "Intel-IOMMU: disable supported super page\n");
480                         intel_iommu_superpage = 0;
481                 }
482
483                 str += strcspn(str, ",");
484                 while (*str == ',')
485                         str++;
486         }
487         return 0;
488 }
489 __setup("intel_iommu=", intel_iommu_setup);
490
491 static struct kmem_cache *iommu_domain_cache;
492 static struct kmem_cache *iommu_devinfo_cache;
493 static struct kmem_cache *iommu_iova_cache;
494
495 static inline void *alloc_pgtable_page(int node)
496 {
497         struct page *page;
498         void *vaddr = NULL;
499
500         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501         if (page)
502                 vaddr = page_address(page);
503         return vaddr;
504 }
505
506 static inline void free_pgtable_page(void *vaddr)
507 {
508         free_page((unsigned long)vaddr);
509 }
510
511 static inline void *alloc_domain_mem(void)
512 {
513         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
514 }
515
516 static void free_domain_mem(void *vaddr)
517 {
518         kmem_cache_free(iommu_domain_cache, vaddr);
519 }
520
521 static inline void * alloc_devinfo_mem(void)
522 {
523         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
524 }
525
526 static inline void free_devinfo_mem(void *vaddr)
527 {
528         kmem_cache_free(iommu_devinfo_cache, vaddr);
529 }
530
531 struct iova *alloc_iova_mem(void)
532 {
533         return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
534 }
535
536 void free_iova_mem(struct iova *iova)
537 {
538         kmem_cache_free(iommu_iova_cache, iova);
539 }
540
541 static inline int domain_type_is_vm(struct dmar_domain *domain)
542 {
543         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
544 }
545
546 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
547 {
548         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
549                                 DOMAIN_FLAG_STATIC_IDENTITY);
550 }
551
552 static inline int domain_pfn_supported(struct dmar_domain *domain,
553                                        unsigned long pfn)
554 {
555         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
556
557         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
558 }
559
560 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
561 {
562         unsigned long sagaw;
563         int agaw = -1;
564
565         sagaw = cap_sagaw(iommu->cap);
566         for (agaw = width_to_agaw(max_gaw);
567              agaw >= 0; agaw--) {
568                 if (test_bit(agaw, &sagaw))
569                         break;
570         }
571
572         return agaw;
573 }
574
575 /*
576  * Calculate max SAGAW for each iommu.
577  */
578 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
579 {
580         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
581 }
582
583 /*
584  * calculate agaw for each iommu.
585  * "SAGAW" may be different across iommus, use a default agaw, and
586  * get a supported less agaw for iommus that don't support the default agaw.
587  */
588 int iommu_calculate_agaw(struct intel_iommu *iommu)
589 {
590         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
591 }
592
593 /* This functionin only returns single iommu in a domain */
594 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
595 {
596         int iommu_id;
597
598         /* si_domain and vm domain should not get here. */
599         BUG_ON(domain_type_is_vm_or_si(domain));
600         iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
601         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
602                 return NULL;
603
604         return g_iommus[iommu_id];
605 }
606
607 static void domain_update_iommu_coherency(struct dmar_domain *domain)
608 {
609         struct dmar_drhd_unit *drhd;
610         struct intel_iommu *iommu;
611         int i, found = 0;
612
613         domain->iommu_coherency = 1;
614
615         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
616                 found = 1;
617                 if (!ecap_coherent(g_iommus[i]->ecap)) {
618                         domain->iommu_coherency = 0;
619                         break;
620                 }
621         }
622         if (found)
623                 return;
624
625         /* No hardware attached; use lowest common denominator */
626         rcu_read_lock();
627         for_each_active_iommu(iommu, drhd) {
628                 if (!ecap_coherent(iommu->ecap)) {
629                         domain->iommu_coherency = 0;
630                         break;
631                 }
632         }
633         rcu_read_unlock();
634 }
635
636 static int domain_update_iommu_snooping(struct intel_iommu *skip)
637 {
638         struct dmar_drhd_unit *drhd;
639         struct intel_iommu *iommu;
640         int ret = 1;
641
642         rcu_read_lock();
643         for_each_active_iommu(iommu, drhd) {
644                 if (iommu != skip) {
645                         if (!ecap_sc_support(iommu->ecap)) {
646                                 ret = 0;
647                                 break;
648                         }
649                 }
650         }
651         rcu_read_unlock();
652
653         return ret;
654 }
655
656 static int domain_update_iommu_superpage(struct intel_iommu *skip)
657 {
658         struct dmar_drhd_unit *drhd;
659         struct intel_iommu *iommu;
660         int mask = 0xf;
661
662         if (!intel_iommu_superpage) {
663                 return 0;
664         }
665
666         /* set iommu_superpage to the smallest common denominator */
667         rcu_read_lock();
668         for_each_active_iommu(iommu, drhd) {
669                 if (iommu != skip) {
670                         mask &= cap_super_page_val(iommu->cap);
671                         if (!mask)
672                                 break;
673                 }
674         }
675         rcu_read_unlock();
676
677         return fls(mask);
678 }
679
680 /* Some capabilities may be different across iommus */
681 static void domain_update_iommu_cap(struct dmar_domain *domain)
682 {
683         domain_update_iommu_coherency(domain);
684         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
685         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
686 }
687
688 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
689 {
690         struct dmar_drhd_unit *drhd = NULL;
691         struct intel_iommu *iommu;
692         struct device *tmp;
693         struct pci_dev *ptmp, *pdev = NULL;
694         u16 segment = 0;
695         int i;
696
697         if (dev_is_pci(dev)) {
698                 pdev = to_pci_dev(dev);
699                 segment = pci_domain_nr(pdev->bus);
700         } else if (ACPI_COMPANION(dev))
701                 dev = &ACPI_COMPANION(dev)->dev;
702
703         rcu_read_lock();
704         for_each_active_iommu(iommu, drhd) {
705                 if (pdev && segment != drhd->segment)
706                         continue;
707
708                 for_each_active_dev_scope(drhd->devices,
709                                           drhd->devices_cnt, i, tmp) {
710                         if (tmp == dev) {
711                                 *bus = drhd->devices[i].bus;
712                                 *devfn = drhd->devices[i].devfn;
713                                 goto out;
714                         }
715
716                         if (!pdev || !dev_is_pci(tmp))
717                                 continue;
718
719                         ptmp = to_pci_dev(tmp);
720                         if (ptmp->subordinate &&
721                             ptmp->subordinate->number <= pdev->bus->number &&
722                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
723                                 goto got_pdev;
724                 }
725
726                 if (pdev && drhd->include_all) {
727                 got_pdev:
728                         *bus = pdev->bus->number;
729                         *devfn = pdev->devfn;
730                         goto out;
731                 }
732         }
733         iommu = NULL;
734  out:
735         rcu_read_unlock();
736
737         return iommu;
738 }
739
740 static void domain_flush_cache(struct dmar_domain *domain,
741                                void *addr, int size)
742 {
743         if (!domain->iommu_coherency)
744                 clflush_cache_range(addr, size);
745 }
746
747 /* Gets context entry for a given bus and devfn */
748 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
749                 u8 bus, u8 devfn)
750 {
751         struct root_entry *root;
752         struct context_entry *context;
753         unsigned long phy_addr;
754         unsigned long flags;
755
756         spin_lock_irqsave(&iommu->lock, flags);
757         root = &iommu->root_entry[bus];
758         context = get_context_addr_from_root(root);
759         if (!context) {
760                 context = (struct context_entry *)
761                                 alloc_pgtable_page(iommu->node);
762                 if (!context) {
763                         spin_unlock_irqrestore(&iommu->lock, flags);
764                         return NULL;
765                 }
766                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
767                 phy_addr = virt_to_phys((void *)context);
768                 set_root_value(root, phy_addr);
769                 set_root_present(root);
770                 __iommu_flush_cache(iommu, root, sizeof(*root));
771         }
772         spin_unlock_irqrestore(&iommu->lock, flags);
773         return &context[devfn];
774 }
775
776 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
777 {
778         struct root_entry *root;
779         struct context_entry *context;
780         int ret;
781         unsigned long flags;
782
783         spin_lock_irqsave(&iommu->lock, flags);
784         root = &iommu->root_entry[bus];
785         context = get_context_addr_from_root(root);
786         if (!context) {
787                 ret = 0;
788                 goto out;
789         }
790         ret = context_present(&context[devfn]);
791 out:
792         spin_unlock_irqrestore(&iommu->lock, flags);
793         return ret;
794 }
795
796 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
797 {
798         struct root_entry *root;
799         struct context_entry *context;
800         unsigned long flags;
801
802         spin_lock_irqsave(&iommu->lock, flags);
803         root = &iommu->root_entry[bus];
804         context = get_context_addr_from_root(root);
805         if (context) {
806                 context_clear_entry(&context[devfn]);
807                 __iommu_flush_cache(iommu, &context[devfn], \
808                         sizeof(*context));
809         }
810         spin_unlock_irqrestore(&iommu->lock, flags);
811 }
812
813 static void free_context_table(struct intel_iommu *iommu)
814 {
815         struct root_entry *root;
816         int i;
817         unsigned long flags;
818         struct context_entry *context;
819
820         spin_lock_irqsave(&iommu->lock, flags);
821         if (!iommu->root_entry) {
822                 goto out;
823         }
824         for (i = 0; i < ROOT_ENTRY_NR; i++) {
825                 root = &iommu->root_entry[i];
826                 context = get_context_addr_from_root(root);
827                 if (context)
828                         free_pgtable_page(context);
829         }
830         free_pgtable_page(iommu->root_entry);
831         iommu->root_entry = NULL;
832 out:
833         spin_unlock_irqrestore(&iommu->lock, flags);
834 }
835
836 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
837                                       unsigned long pfn, int *target_level)
838 {
839         struct dma_pte *parent, *pte = NULL;
840         int level = agaw_to_level(domain->agaw);
841         int offset;
842
843         BUG_ON(!domain->pgd);
844
845         if (!domain_pfn_supported(domain, pfn))
846                 /* Address beyond IOMMU's addressing capabilities. */
847                 return NULL;
848
849         parent = domain->pgd;
850
851         while (1) {
852                 void *tmp_page;
853
854                 offset = pfn_level_offset(pfn, level);
855                 pte = &parent[offset];
856                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
857                         break;
858                 if (level == *target_level)
859                         break;
860
861                 if (!dma_pte_present(pte)) {
862                         uint64_t pteval;
863
864                         tmp_page = alloc_pgtable_page(domain->nid);
865
866                         if (!tmp_page)
867                                 return NULL;
868
869                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
870                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
871                         if (cmpxchg64(&pte->val, 0ULL, pteval))
872                                 /* Someone else set it while we were thinking; use theirs. */
873                                 free_pgtable_page(tmp_page);
874                         else
875                                 domain_flush_cache(domain, pte, sizeof(*pte));
876                 }
877                 if (level == 1)
878                         break;
879
880                 parent = phys_to_virt(dma_pte_addr(pte));
881                 level--;
882         }
883
884         if (!*target_level)
885                 *target_level = level;
886
887         return pte;
888 }
889
890
891 /* return address's pte at specific level */
892 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
893                                          unsigned long pfn,
894                                          int level, int *large_page)
895 {
896         struct dma_pte *parent, *pte = NULL;
897         int total = agaw_to_level(domain->agaw);
898         int offset;
899
900         parent = domain->pgd;
901         while (level <= total) {
902                 offset = pfn_level_offset(pfn, total);
903                 pte = &parent[offset];
904                 if (level == total)
905                         return pte;
906
907                 if (!dma_pte_present(pte)) {
908                         *large_page = total;
909                         break;
910                 }
911
912                 if (dma_pte_superpage(pte)) {
913                         *large_page = total;
914                         return pte;
915                 }
916
917                 parent = phys_to_virt(dma_pte_addr(pte));
918                 total--;
919         }
920         return NULL;
921 }
922
923 /* clear last level pte, a tlb flush should be followed */
924 static void dma_pte_clear_range(struct dmar_domain *domain,
925                                 unsigned long start_pfn,
926                                 unsigned long last_pfn)
927 {
928         unsigned int large_page = 1;
929         struct dma_pte *first_pte, *pte;
930
931         BUG_ON(!domain_pfn_supported(domain, start_pfn));
932         BUG_ON(!domain_pfn_supported(domain, last_pfn));
933         BUG_ON(start_pfn > last_pfn);
934
935         /* we don't need lock here; nobody else touches the iova range */
936         do {
937                 large_page = 1;
938                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
939                 if (!pte) {
940                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
941                         continue;
942                 }
943                 do {
944                         dma_clear_pte(pte);
945                         start_pfn += lvl_to_nr_pages(large_page);
946                         pte++;
947                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
948
949                 domain_flush_cache(domain, first_pte,
950                                    (void *)pte - (void *)first_pte);
951
952         } while (start_pfn && start_pfn <= last_pfn);
953 }
954
955 static void dma_pte_free_level(struct dmar_domain *domain, int level,
956                                struct dma_pte *pte, unsigned long pfn,
957                                unsigned long start_pfn, unsigned long last_pfn)
958 {
959         pfn = max(start_pfn, pfn);
960         pte = &pte[pfn_level_offset(pfn, level)];
961
962         do {
963                 unsigned long level_pfn;
964                 struct dma_pte *level_pte;
965
966                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
967                         goto next;
968
969                 level_pfn = pfn & level_mask(level - 1);
970                 level_pte = phys_to_virt(dma_pte_addr(pte));
971
972                 if (level > 2)
973                         dma_pte_free_level(domain, level - 1, level_pte,
974                                            level_pfn, start_pfn, last_pfn);
975
976                 /* If range covers entire pagetable, free it */
977                 if (!(start_pfn > level_pfn ||
978                       last_pfn < level_pfn + level_size(level) - 1)) {
979                         dma_clear_pte(pte);
980                         domain_flush_cache(domain, pte, sizeof(*pte));
981                         free_pgtable_page(level_pte);
982                 }
983 next:
984                 pfn += level_size(level);
985         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
986 }
987
988 /* free page table pages. last level pte should already be cleared */
989 static void dma_pte_free_pagetable(struct dmar_domain *domain,
990                                    unsigned long start_pfn,
991                                    unsigned long last_pfn)
992 {
993         BUG_ON(!domain_pfn_supported(domain, start_pfn));
994         BUG_ON(!domain_pfn_supported(domain, last_pfn));
995         BUG_ON(start_pfn > last_pfn);
996
997         dma_pte_clear_range(domain, start_pfn, last_pfn);
998
999         /* We don't need lock here; nobody else touches the iova range */
1000         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1001                            domain->pgd, 0, start_pfn, last_pfn);
1002
1003         /* free pgd */
1004         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1005                 free_pgtable_page(domain->pgd);
1006                 domain->pgd = NULL;
1007         }
1008 }
1009
1010 /* When a page at a given level is being unlinked from its parent, we don't
1011    need to *modify* it at all. All we need to do is make a list of all the
1012    pages which can be freed just as soon as we've flushed the IOTLB and we
1013    know the hardware page-walk will no longer touch them.
1014    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1015    be freed. */
1016 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1017                                             int level, struct dma_pte *pte,
1018                                             struct page *freelist)
1019 {
1020         struct page *pg;
1021
1022         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1023         pg->freelist = freelist;
1024         freelist = pg;
1025
1026         if (level == 1)
1027                 return freelist;
1028
1029         pte = page_address(pg);
1030         do {
1031                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1032                         freelist = dma_pte_list_pagetables(domain, level - 1,
1033                                                            pte, freelist);
1034                 pte++;
1035         } while (!first_pte_in_page(pte));
1036
1037         return freelist;
1038 }
1039
1040 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1041                                         struct dma_pte *pte, unsigned long pfn,
1042                                         unsigned long start_pfn,
1043                                         unsigned long last_pfn,
1044                                         struct page *freelist)
1045 {
1046         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1047
1048         pfn = max(start_pfn, pfn);
1049         pte = &pte[pfn_level_offset(pfn, level)];
1050
1051         do {
1052                 unsigned long level_pfn;
1053
1054                 if (!dma_pte_present(pte))
1055                         goto next;
1056
1057                 level_pfn = pfn & level_mask(level);
1058
1059                 /* If range covers entire pagetable, free it */
1060                 if (start_pfn <= level_pfn &&
1061                     last_pfn >= level_pfn + level_size(level) - 1) {
1062                         /* These suborbinate page tables are going away entirely. Don't
1063                            bother to clear them; we're just going to *free* them. */
1064                         if (level > 1 && !dma_pte_superpage(pte))
1065                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1066
1067                         dma_clear_pte(pte);
1068                         if (!first_pte)
1069                                 first_pte = pte;
1070                         last_pte = pte;
1071                 } else if (level > 1) {
1072                         /* Recurse down into a level that isn't *entirely* obsolete */
1073                         freelist = dma_pte_clear_level(domain, level - 1,
1074                                                        phys_to_virt(dma_pte_addr(pte)),
1075                                                        level_pfn, start_pfn, last_pfn,
1076                                                        freelist);
1077                 }
1078 next:
1079                 pfn += level_size(level);
1080         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1081
1082         if (first_pte)
1083                 domain_flush_cache(domain, first_pte,
1084                                    (void *)++last_pte - (void *)first_pte);
1085
1086         return freelist;
1087 }
1088
1089 /* We can't just free the pages because the IOMMU may still be walking
1090    the page tables, and may have cached the intermediate levels. The
1091    pages can only be freed after the IOTLB flush has been done. */
1092 struct page *domain_unmap(struct dmar_domain *domain,
1093                           unsigned long start_pfn,
1094                           unsigned long last_pfn)
1095 {
1096         struct page *freelist = NULL;
1097
1098         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1099         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1100         BUG_ON(start_pfn > last_pfn);
1101
1102         /* we don't need lock here; nobody else touches the iova range */
1103         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1104                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1105
1106         /* free pgd */
1107         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1108                 struct page *pgd_page = virt_to_page(domain->pgd);
1109                 pgd_page->freelist = freelist;
1110                 freelist = pgd_page;
1111
1112                 domain->pgd = NULL;
1113         }
1114
1115         return freelist;
1116 }
1117
1118 void dma_free_pagelist(struct page *freelist)
1119 {
1120         struct page *pg;
1121
1122         while ((pg = freelist)) {
1123                 freelist = pg->freelist;
1124                 free_pgtable_page(page_address(pg));
1125         }
1126 }
1127
1128 /* iommu handling */
1129 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1130 {
1131         struct root_entry *root;
1132         unsigned long flags;
1133
1134         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1135         if (!root)
1136                 return -ENOMEM;
1137
1138         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1139
1140         spin_lock_irqsave(&iommu->lock, flags);
1141         iommu->root_entry = root;
1142         spin_unlock_irqrestore(&iommu->lock, flags);
1143
1144         return 0;
1145 }
1146
1147 static void iommu_set_root_entry(struct intel_iommu *iommu)
1148 {
1149         void *addr;
1150         u32 sts;
1151         unsigned long flag;
1152
1153         addr = iommu->root_entry;
1154
1155         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1156         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1157
1158         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1159
1160         /* Make sure hardware complete it */
1161         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1162                       readl, (sts & DMA_GSTS_RTPS), sts);
1163
1164         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1165 }
1166
1167 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1168 {
1169         u32 val;
1170         unsigned long flag;
1171
1172         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1173                 return;
1174
1175         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1176         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1177
1178         /* Make sure hardware complete it */
1179         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1180                       readl, (!(val & DMA_GSTS_WBFS)), val);
1181
1182         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1183 }
1184
1185 /* return value determine if we need a write buffer flush */
1186 static void __iommu_flush_context(struct intel_iommu *iommu,
1187                                   u16 did, u16 source_id, u8 function_mask,
1188                                   u64 type)
1189 {
1190         u64 val = 0;
1191         unsigned long flag;
1192
1193         switch (type) {
1194         case DMA_CCMD_GLOBAL_INVL:
1195                 val = DMA_CCMD_GLOBAL_INVL;
1196                 break;
1197         case DMA_CCMD_DOMAIN_INVL:
1198                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1199                 break;
1200         case DMA_CCMD_DEVICE_INVL:
1201                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1202                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1203                 break;
1204         default:
1205                 BUG();
1206         }
1207         val |= DMA_CCMD_ICC;
1208
1209         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1210         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1211
1212         /* Make sure hardware complete it */
1213         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1214                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1215
1216         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1217 }
1218
1219 /* return value determine if we need a write buffer flush */
1220 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1221                                 u64 addr, unsigned int size_order, u64 type)
1222 {
1223         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1224         u64 val = 0, val_iva = 0;
1225         unsigned long flag;
1226
1227         switch (type) {
1228         case DMA_TLB_GLOBAL_FLUSH:
1229                 /* global flush doesn't need set IVA_REG */
1230                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1231                 break;
1232         case DMA_TLB_DSI_FLUSH:
1233                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1234                 break;
1235         case DMA_TLB_PSI_FLUSH:
1236                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1237                 /* IH bit is passed in as part of address */
1238                 val_iva = size_order | addr;
1239                 break;
1240         default:
1241                 BUG();
1242         }
1243         /* Note: set drain read/write */
1244 #if 0
1245         /*
1246          * This is probably to be super secure.. Looks like we can
1247          * ignore it without any impact.
1248          */
1249         if (cap_read_drain(iommu->cap))
1250                 val |= DMA_TLB_READ_DRAIN;
1251 #endif
1252         if (cap_write_drain(iommu->cap))
1253                 val |= DMA_TLB_WRITE_DRAIN;
1254
1255         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1256         /* Note: Only uses first TLB reg currently */
1257         if (val_iva)
1258                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1259         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1260
1261         /* Make sure hardware complete it */
1262         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1263                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1264
1265         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1266
1267         /* check IOTLB invalidation granularity */
1268         if (DMA_TLB_IAIG(val) == 0)
1269                 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1270         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1271                 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1272                         (unsigned long long)DMA_TLB_IIRG(type),
1273                         (unsigned long long)DMA_TLB_IAIG(val));
1274 }
1275
1276 static struct device_domain_info *
1277 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1278                          u8 bus, u8 devfn)
1279 {
1280         int found = 0;
1281         unsigned long flags;
1282         struct device_domain_info *info;
1283         struct pci_dev *pdev;
1284
1285         if (!ecap_dev_iotlb_support(iommu->ecap))
1286                 return NULL;
1287
1288         if (!iommu->qi)
1289                 return NULL;
1290
1291         spin_lock_irqsave(&device_domain_lock, flags);
1292         list_for_each_entry(info, &domain->devices, link)
1293                 if (info->iommu == iommu && info->bus == bus &&
1294                     info->devfn == devfn) {
1295                         found = 1;
1296                         break;
1297                 }
1298         spin_unlock_irqrestore(&device_domain_lock, flags);
1299
1300         if (!found || !info->dev || !dev_is_pci(info->dev))
1301                 return NULL;
1302
1303         pdev = to_pci_dev(info->dev);
1304
1305         if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1306                 return NULL;
1307
1308         if (!dmar_find_matched_atsr_unit(pdev))
1309                 return NULL;
1310
1311         return info;
1312 }
1313
1314 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1315 {
1316         if (!info || !dev_is_pci(info->dev))
1317                 return;
1318
1319         pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1320 }
1321
1322 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1323 {
1324         if (!info->dev || !dev_is_pci(info->dev) ||
1325             !pci_ats_enabled(to_pci_dev(info->dev)))
1326                 return;
1327
1328         pci_disable_ats(to_pci_dev(info->dev));
1329 }
1330
1331 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1332                                   u64 addr, unsigned mask)
1333 {
1334         u16 sid, qdep;
1335         unsigned long flags;
1336         struct device_domain_info *info;
1337
1338         spin_lock_irqsave(&device_domain_lock, flags);
1339         list_for_each_entry(info, &domain->devices, link) {
1340                 struct pci_dev *pdev;
1341                 if (!info->dev || !dev_is_pci(info->dev))
1342                         continue;
1343
1344                 pdev = to_pci_dev(info->dev);
1345                 if (!pci_ats_enabled(pdev))
1346                         continue;
1347
1348                 sid = info->bus << 8 | info->devfn;
1349                 qdep = pci_ats_queue_depth(pdev);
1350                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1351         }
1352         spin_unlock_irqrestore(&device_domain_lock, flags);
1353 }
1354
1355 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1356                                   unsigned long pfn, unsigned int pages, int ih, int map)
1357 {
1358         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1359         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1360
1361         BUG_ON(pages == 0);
1362
1363         if (ih)
1364                 ih = 1 << 6;
1365         /*
1366          * Fallback to domain selective flush if no PSI support or the size is
1367          * too big.
1368          * PSI requires page size to be 2 ^ x, and the base address is naturally
1369          * aligned to the size
1370          */
1371         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1372                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1373                                                 DMA_TLB_DSI_FLUSH);
1374         else
1375                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1376                                                 DMA_TLB_PSI_FLUSH);
1377
1378         /*
1379          * In caching mode, changes of pages from non-present to present require
1380          * flush. However, device IOTLB doesn't need to be flushed in this case.
1381          */
1382         if (!cap_caching_mode(iommu->cap) || !map)
1383                 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1384 }
1385
1386 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1387 {
1388         u32 pmen;
1389         unsigned long flags;
1390
1391         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1392         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1393         pmen &= ~DMA_PMEN_EPM;
1394         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1395
1396         /* wait for the protected region status bit to clear */
1397         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1398                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1399
1400         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1401 }
1402
1403 static void iommu_enable_translation(struct intel_iommu *iommu)
1404 {
1405         u32 sts;
1406         unsigned long flags;
1407
1408         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1409         iommu->gcmd |= DMA_GCMD_TE;
1410         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1411
1412         /* Make sure hardware complete it */
1413         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1414                       readl, (sts & DMA_GSTS_TES), sts);
1415
1416         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1417 }
1418
1419 static void iommu_disable_translation(struct intel_iommu *iommu)
1420 {
1421         u32 sts;
1422         unsigned long flag;
1423
1424         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1425         iommu->gcmd &= ~DMA_GCMD_TE;
1426         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1427
1428         /* Make sure hardware complete it */
1429         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1430                       readl, (!(sts & DMA_GSTS_TES)), sts);
1431
1432         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1433 }
1434
1435
1436 static int iommu_init_domains(struct intel_iommu *iommu)
1437 {
1438         unsigned long ndomains;
1439         unsigned long nlongs;
1440
1441         ndomains = cap_ndoms(iommu->cap);
1442         pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1443                  iommu->seq_id, ndomains);
1444         nlongs = BITS_TO_LONGS(ndomains);
1445
1446         spin_lock_init(&iommu->lock);
1447
1448         /* TBD: there might be 64K domains,
1449          * consider other allocation for future chip
1450          */
1451         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1452         if (!iommu->domain_ids) {
1453                 pr_err("IOMMU%d: allocating domain id array failed\n",
1454                        iommu->seq_id);
1455                 return -ENOMEM;
1456         }
1457         iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1458                         GFP_KERNEL);
1459         if (!iommu->domains) {
1460                 pr_err("IOMMU%d: allocating domain array failed\n",
1461                        iommu->seq_id);
1462                 kfree(iommu->domain_ids);
1463                 iommu->domain_ids = NULL;
1464                 return -ENOMEM;
1465         }
1466
1467         /*
1468          * if Caching mode is set, then invalid translations are tagged
1469          * with domainid 0. Hence we need to pre-allocate it.
1470          */
1471         if (cap_caching_mode(iommu->cap))
1472                 set_bit(0, iommu->domain_ids);
1473         return 0;
1474 }
1475
1476 static void free_dmar_iommu(struct intel_iommu *iommu)
1477 {
1478         struct dmar_domain *domain;
1479         int i;
1480
1481         if ((iommu->domains) && (iommu->domain_ids)) {
1482                 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1483                         /*
1484                          * Domain id 0 is reserved for invalid translation
1485                          * if hardware supports caching mode.
1486                          */
1487                         if (cap_caching_mode(iommu->cap) && i == 0)
1488                                 continue;
1489
1490                         domain = iommu->domains[i];
1491                         clear_bit(i, iommu->domain_ids);
1492                         if (domain_detach_iommu(domain, iommu) == 0 &&
1493                             !domain_type_is_vm(domain))
1494                                 domain_exit(domain);
1495                 }
1496         }
1497
1498         if (iommu->gcmd & DMA_GCMD_TE)
1499                 iommu_disable_translation(iommu);
1500
1501         kfree(iommu->domains);
1502         kfree(iommu->domain_ids);
1503         iommu->domains = NULL;
1504         iommu->domain_ids = NULL;
1505
1506         g_iommus[iommu->seq_id] = NULL;
1507
1508         /* free context mapping */
1509         free_context_table(iommu);
1510 }
1511
1512 static struct dmar_domain *alloc_domain(int flags)
1513 {
1514         /* domain id for virtual machine, it won't be set in context */
1515         static atomic_t vm_domid = ATOMIC_INIT(0);
1516         struct dmar_domain *domain;
1517
1518         domain = alloc_domain_mem();
1519         if (!domain)
1520                 return NULL;
1521
1522         memset(domain, 0, sizeof(*domain));
1523         domain->nid = -1;
1524         domain->flags = flags;
1525         spin_lock_init(&domain->iommu_lock);
1526         INIT_LIST_HEAD(&domain->devices);
1527         if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1528                 domain->id = atomic_inc_return(&vm_domid);
1529
1530         return domain;
1531 }
1532
1533 static int __iommu_attach_domain(struct dmar_domain *domain,
1534                                  struct intel_iommu *iommu)
1535 {
1536         int num;
1537         unsigned long ndomains;
1538
1539         ndomains = cap_ndoms(iommu->cap);
1540         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1541         if (num < ndomains) {
1542                 set_bit(num, iommu->domain_ids);
1543                 iommu->domains[num] = domain;
1544         } else {
1545                 num = -ENOSPC;
1546         }
1547
1548         return num;
1549 }
1550
1551 static int iommu_attach_domain(struct dmar_domain *domain,
1552                                struct intel_iommu *iommu)
1553 {
1554         int num;
1555         unsigned long flags;
1556
1557         spin_lock_irqsave(&iommu->lock, flags);
1558         num = __iommu_attach_domain(domain, iommu);
1559         spin_unlock_irqrestore(&iommu->lock, flags);
1560         if (num < 0)
1561                 pr_err("IOMMU: no free domain ids\n");
1562
1563         return num;
1564 }
1565
1566 static int iommu_attach_vm_domain(struct dmar_domain *domain,
1567                                   struct intel_iommu *iommu)
1568 {
1569         int num;
1570         unsigned long ndomains;
1571
1572         ndomains = cap_ndoms(iommu->cap);
1573         for_each_set_bit(num, iommu->domain_ids, ndomains)
1574                 if (iommu->domains[num] == domain)
1575                         return num;
1576
1577         return __iommu_attach_domain(domain, iommu);
1578 }
1579
1580 static void iommu_detach_domain(struct dmar_domain *domain,
1581                                 struct intel_iommu *iommu)
1582 {
1583         unsigned long flags;
1584         int num, ndomains;
1585
1586         spin_lock_irqsave(&iommu->lock, flags);
1587         if (domain_type_is_vm_or_si(domain)) {
1588                 ndomains = cap_ndoms(iommu->cap);
1589                 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1590                         if (iommu->domains[num] == domain) {
1591                                 clear_bit(num, iommu->domain_ids);
1592                                 iommu->domains[num] = NULL;
1593                                 break;
1594                         }
1595                 }
1596         } else {
1597                 clear_bit(domain->id, iommu->domain_ids);
1598                 iommu->domains[domain->id] = NULL;
1599         }
1600         spin_unlock_irqrestore(&iommu->lock, flags);
1601 }
1602
1603 static void domain_attach_iommu(struct dmar_domain *domain,
1604                                struct intel_iommu *iommu)
1605 {
1606         unsigned long flags;
1607
1608         spin_lock_irqsave(&domain->iommu_lock, flags);
1609         if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1610                 domain->iommu_count++;
1611                 if (domain->iommu_count == 1)
1612                         domain->nid = iommu->node;
1613                 domain_update_iommu_cap(domain);
1614         }
1615         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1616 }
1617
1618 static int domain_detach_iommu(struct dmar_domain *domain,
1619                                struct intel_iommu *iommu)
1620 {
1621         unsigned long flags;
1622         int count = INT_MAX;
1623
1624         spin_lock_irqsave(&domain->iommu_lock, flags);
1625         if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1626                 count = --domain->iommu_count;
1627                 domain_update_iommu_cap(domain);
1628         }
1629         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1630
1631         return count;
1632 }
1633
1634 static struct iova_domain reserved_iova_list;
1635 static struct lock_class_key reserved_rbtree_key;
1636
1637 static int dmar_init_reserved_ranges(void)
1638 {
1639         struct pci_dev *pdev = NULL;
1640         struct iova *iova;
1641         int i;
1642
1643         init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1644
1645         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1646                 &reserved_rbtree_key);
1647
1648         /* IOAPIC ranges shouldn't be accessed by DMA */
1649         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1650                 IOVA_PFN(IOAPIC_RANGE_END));
1651         if (!iova) {
1652                 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1653                 return -ENODEV;
1654         }
1655
1656         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1657         for_each_pci_dev(pdev) {
1658                 struct resource *r;
1659
1660                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1661                         r = &pdev->resource[i];
1662                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1663                                 continue;
1664                         iova = reserve_iova(&reserved_iova_list,
1665                                             IOVA_PFN(r->start),
1666                                             IOVA_PFN(r->end));
1667                         if (!iova) {
1668                                 printk(KERN_ERR "Reserve iova failed\n");
1669                                 return -ENODEV;
1670                         }
1671                 }
1672         }
1673         return 0;
1674 }
1675
1676 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1677 {
1678         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1679 }
1680
1681 static inline int guestwidth_to_adjustwidth(int gaw)
1682 {
1683         int agaw;
1684         int r = (gaw - 12) % 9;
1685
1686         if (r == 0)
1687                 agaw = gaw;
1688         else
1689                 agaw = gaw + 9 - r;
1690         if (agaw > 64)
1691                 agaw = 64;
1692         return agaw;
1693 }
1694
1695 static int domain_init(struct dmar_domain *domain, int guest_width)
1696 {
1697         struct intel_iommu *iommu;
1698         int adjust_width, agaw;
1699         unsigned long sagaw;
1700
1701         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1702         domain_reserve_special_ranges(domain);
1703
1704         /* calculate AGAW */
1705         iommu = domain_get_iommu(domain);
1706         if (guest_width > cap_mgaw(iommu->cap))
1707                 guest_width = cap_mgaw(iommu->cap);
1708         domain->gaw = guest_width;
1709         adjust_width = guestwidth_to_adjustwidth(guest_width);
1710         agaw = width_to_agaw(adjust_width);
1711         sagaw = cap_sagaw(iommu->cap);
1712         if (!test_bit(agaw, &sagaw)) {
1713                 /* hardware doesn't support it, choose a bigger one */
1714                 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1715                 agaw = find_next_bit(&sagaw, 5, agaw);
1716                 if (agaw >= 5)
1717                         return -ENODEV;
1718         }
1719         domain->agaw = agaw;
1720
1721         if (ecap_coherent(iommu->ecap))
1722                 domain->iommu_coherency = 1;
1723         else
1724                 domain->iommu_coherency = 0;
1725
1726         if (ecap_sc_support(iommu->ecap))
1727                 domain->iommu_snooping = 1;
1728         else
1729                 domain->iommu_snooping = 0;
1730
1731         if (intel_iommu_superpage)
1732                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1733         else
1734                 domain->iommu_superpage = 0;
1735
1736         domain->nid = iommu->node;
1737
1738         /* always allocate the top pgd */
1739         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1740         if (!domain->pgd)
1741                 return -ENOMEM;
1742         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1743         return 0;
1744 }
1745
1746 static void domain_exit(struct dmar_domain *domain)
1747 {
1748         struct dmar_drhd_unit *drhd;
1749         struct intel_iommu *iommu;
1750         struct page *freelist = NULL;
1751
1752         /* Domain 0 is reserved, so dont process it */
1753         if (!domain)
1754                 return;
1755
1756         /* Flush any lazy unmaps that may reference this domain */
1757         if (!intel_iommu_strict)
1758                 flush_unmaps_timeout(0);
1759
1760         /* remove associated devices */
1761         domain_remove_dev_info(domain);
1762
1763         /* destroy iovas */
1764         put_iova_domain(&domain->iovad);
1765
1766         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1767
1768         /* clear attached or cached domains */
1769         rcu_read_lock();
1770         for_each_active_iommu(iommu, drhd)
1771                 iommu_detach_domain(domain, iommu);
1772         rcu_read_unlock();
1773
1774         dma_free_pagelist(freelist);
1775
1776         free_domain_mem(domain);
1777 }
1778
1779 static int domain_context_mapping_one(struct dmar_domain *domain,
1780                                       struct intel_iommu *iommu,
1781                                       u8 bus, u8 devfn, int translation)
1782 {
1783         struct context_entry *context;
1784         unsigned long flags;
1785         struct dma_pte *pgd;
1786         int id;
1787         int agaw;
1788         struct device_domain_info *info = NULL;
1789
1790         pr_debug("Set context mapping for %02x:%02x.%d\n",
1791                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1792
1793         BUG_ON(!domain->pgd);
1794         BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1795                translation != CONTEXT_TT_MULTI_LEVEL);
1796
1797         context = device_to_context_entry(iommu, bus, devfn);
1798         if (!context)
1799                 return -ENOMEM;
1800         spin_lock_irqsave(&iommu->lock, flags);
1801         if (context_present(context)) {
1802                 spin_unlock_irqrestore(&iommu->lock, flags);
1803                 return 0;
1804         }
1805
1806         id = domain->id;
1807         pgd = domain->pgd;
1808
1809         if (domain_type_is_vm_or_si(domain)) {
1810                 if (domain_type_is_vm(domain)) {
1811                         id = iommu_attach_vm_domain(domain, iommu);
1812                         if (id < 0) {
1813                                 spin_unlock_irqrestore(&iommu->lock, flags);
1814                                 pr_err("IOMMU: no free domain ids\n");
1815                                 return -EFAULT;
1816                         }
1817                 }
1818
1819                 /* Skip top levels of page tables for
1820                  * iommu which has less agaw than default.
1821                  * Unnecessary for PT mode.
1822                  */
1823                 if (translation != CONTEXT_TT_PASS_THROUGH) {
1824                         for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1825                                 pgd = phys_to_virt(dma_pte_addr(pgd));
1826                                 if (!dma_pte_present(pgd)) {
1827                                         spin_unlock_irqrestore(&iommu->lock, flags);
1828                                         return -ENOMEM;
1829                                 }
1830                         }
1831                 }
1832         }
1833
1834         context_set_domain_id(context, id);
1835
1836         if (translation != CONTEXT_TT_PASS_THROUGH) {
1837                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1838                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1839                                      CONTEXT_TT_MULTI_LEVEL;
1840         }
1841         /*
1842          * In pass through mode, AW must be programmed to indicate the largest
1843          * AGAW value supported by hardware. And ASR is ignored by hardware.
1844          */
1845         if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1846                 context_set_address_width(context, iommu->msagaw);
1847         else {
1848                 context_set_address_root(context, virt_to_phys(pgd));
1849                 context_set_address_width(context, iommu->agaw);
1850         }
1851
1852         context_set_translation_type(context, translation);
1853         context_set_fault_enable(context);
1854         context_set_present(context);
1855         domain_flush_cache(domain, context, sizeof(*context));
1856
1857         /*
1858          * It's a non-present to present mapping. If hardware doesn't cache
1859          * non-present entry we only need to flush the write-buffer. If the
1860          * _does_ cache non-present entries, then it does so in the special
1861          * domain #0, which we have to flush:
1862          */
1863         if (cap_caching_mode(iommu->cap)) {
1864                 iommu->flush.flush_context(iommu, 0,
1865                                            (((u16)bus) << 8) | devfn,
1866                                            DMA_CCMD_MASK_NOBIT,
1867                                            DMA_CCMD_DEVICE_INVL);
1868                 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
1869         } else {
1870                 iommu_flush_write_buffer(iommu);
1871         }
1872         iommu_enable_dev_iotlb(info);
1873         spin_unlock_irqrestore(&iommu->lock, flags);
1874
1875         domain_attach_iommu(domain, iommu);
1876
1877         return 0;
1878 }
1879
1880 struct domain_context_mapping_data {
1881         struct dmar_domain *domain;
1882         struct intel_iommu *iommu;
1883         int translation;
1884 };
1885
1886 static int domain_context_mapping_cb(struct pci_dev *pdev,
1887                                      u16 alias, void *opaque)
1888 {
1889         struct domain_context_mapping_data *data = opaque;
1890
1891         return domain_context_mapping_one(data->domain, data->iommu,
1892                                           PCI_BUS_NUM(alias), alias & 0xff,
1893                                           data->translation);
1894 }
1895
1896 static int
1897 domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1898                        int translation)
1899 {
1900         struct intel_iommu *iommu;
1901         u8 bus, devfn;
1902         struct domain_context_mapping_data data;
1903
1904         iommu = device_to_iommu(dev, &bus, &devfn);
1905         if (!iommu)
1906                 return -ENODEV;
1907
1908         if (!dev_is_pci(dev))
1909                 return domain_context_mapping_one(domain, iommu, bus, devfn,
1910                                                   translation);
1911
1912         data.domain = domain;
1913         data.iommu = iommu;
1914         data.translation = translation;
1915
1916         return pci_for_each_dma_alias(to_pci_dev(dev),
1917                                       &domain_context_mapping_cb, &data);
1918 }
1919
1920 static int domain_context_mapped_cb(struct pci_dev *pdev,
1921                                     u16 alias, void *opaque)
1922 {
1923         struct intel_iommu *iommu = opaque;
1924
1925         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
1926 }
1927
1928 static int domain_context_mapped(struct device *dev)
1929 {
1930         struct intel_iommu *iommu;
1931         u8 bus, devfn;
1932
1933         iommu = device_to_iommu(dev, &bus, &devfn);
1934         if (!iommu)
1935                 return -ENODEV;
1936
1937         if (!dev_is_pci(dev))
1938                 return device_context_mapped(iommu, bus, devfn);
1939
1940         return !pci_for_each_dma_alias(to_pci_dev(dev),
1941                                        domain_context_mapped_cb, iommu);
1942 }
1943
1944 /* Returns a number of VTD pages, but aligned to MM page size */
1945 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1946                                             size_t size)
1947 {
1948         host_addr &= ~PAGE_MASK;
1949         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1950 }
1951
1952 /* Return largest possible superpage level for a given mapping */
1953 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1954                                           unsigned long iov_pfn,
1955                                           unsigned long phy_pfn,
1956                                           unsigned long pages)
1957 {
1958         int support, level = 1;
1959         unsigned long pfnmerge;
1960
1961         support = domain->iommu_superpage;
1962
1963         /* To use a large page, the virtual *and* physical addresses
1964            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1965            of them will mean we have to use smaller pages. So just
1966            merge them and check both at once. */
1967         pfnmerge = iov_pfn | phy_pfn;
1968
1969         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1970                 pages >>= VTD_STRIDE_SHIFT;
1971                 if (!pages)
1972                         break;
1973                 pfnmerge >>= VTD_STRIDE_SHIFT;
1974                 level++;
1975                 support--;
1976         }
1977         return level;
1978 }
1979
1980 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1981                             struct scatterlist *sg, unsigned long phys_pfn,
1982                             unsigned long nr_pages, int prot)
1983 {
1984         struct dma_pte *first_pte = NULL, *pte = NULL;
1985         phys_addr_t uninitialized_var(pteval);
1986         unsigned long sg_res;
1987         unsigned int largepage_lvl = 0;
1988         unsigned long lvl_pages = 0;
1989
1990         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
1991
1992         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1993                 return -EINVAL;
1994
1995         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1996
1997         if (sg)
1998                 sg_res = 0;
1999         else {
2000                 sg_res = nr_pages + 1;
2001                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2002         }
2003
2004         while (nr_pages > 0) {
2005                 uint64_t tmp;
2006
2007                 if (!sg_res) {
2008                         sg_res = aligned_nrpages(sg->offset, sg->length);
2009                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2010                         sg->dma_length = sg->length;
2011                         pteval = page_to_phys(sg_page(sg)) | prot;
2012                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2013                 }
2014
2015                 if (!pte) {
2016                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2017
2018                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2019                         if (!pte)
2020                                 return -ENOMEM;
2021                         /* It is large page*/
2022                         if (largepage_lvl > 1) {
2023                                 pteval |= DMA_PTE_LARGE_PAGE;
2024                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2025                                 /*
2026                                  * Ensure that old small page tables are
2027                                  * removed to make room for superpage,
2028                                  * if they exist.
2029                                  */
2030                                 dma_pte_free_pagetable(domain, iov_pfn,
2031                                                        iov_pfn + lvl_pages - 1);
2032                         } else {
2033                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2034                         }
2035
2036                 }
2037                 /* We don't need lock here, nobody else
2038                  * touches the iova range
2039                  */
2040                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2041                 if (tmp) {
2042                         static int dumps = 5;
2043                         printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2044                                iov_pfn, tmp, (unsigned long long)pteval);
2045                         if (dumps) {
2046                                 dumps--;
2047                                 debug_dma_dump_mappings(NULL);
2048                         }
2049                         WARN_ON(1);
2050                 }
2051
2052                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2053
2054                 BUG_ON(nr_pages < lvl_pages);
2055                 BUG_ON(sg_res < lvl_pages);
2056
2057                 nr_pages -= lvl_pages;
2058                 iov_pfn += lvl_pages;
2059                 phys_pfn += lvl_pages;
2060                 pteval += lvl_pages * VTD_PAGE_SIZE;
2061                 sg_res -= lvl_pages;
2062
2063                 /* If the next PTE would be the first in a new page, then we
2064                    need to flush the cache on the entries we've just written.
2065                    And then we'll need to recalculate 'pte', so clear it and
2066                    let it get set again in the if (!pte) block above.
2067
2068                    If we're done (!nr_pages) we need to flush the cache too.
2069
2070                    Also if we've been setting superpages, we may need to
2071                    recalculate 'pte' and switch back to smaller pages for the
2072                    end of the mapping, if the trailing size is not enough to
2073                    use another superpage (i.e. sg_res < lvl_pages). */
2074                 pte++;
2075                 if (!nr_pages || first_pte_in_page(pte) ||
2076                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2077                         domain_flush_cache(domain, first_pte,
2078                                            (void *)pte - (void *)first_pte);
2079                         pte = NULL;
2080                 }
2081
2082                 if (!sg_res && nr_pages)
2083                         sg = sg_next(sg);
2084         }
2085         return 0;
2086 }
2087
2088 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2089                                     struct scatterlist *sg, unsigned long nr_pages,
2090                                     int prot)
2091 {
2092         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2093 }
2094
2095 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2096                                      unsigned long phys_pfn, unsigned long nr_pages,
2097                                      int prot)
2098 {
2099         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2100 }
2101
2102 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2103 {
2104         if (!iommu)
2105                 return;
2106
2107         clear_context_table(iommu, bus, devfn);
2108         iommu->flush.flush_context(iommu, 0, 0, 0,
2109                                            DMA_CCMD_GLOBAL_INVL);
2110         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2111 }
2112
2113 static inline void unlink_domain_info(struct device_domain_info *info)
2114 {
2115         assert_spin_locked(&device_domain_lock);
2116         list_del(&info->link);
2117         list_del(&info->global);
2118         if (info->dev)
2119                 info->dev->archdata.iommu = NULL;
2120 }
2121
2122 static void domain_remove_dev_info(struct dmar_domain *domain)
2123 {
2124         struct device_domain_info *info, *tmp;
2125         unsigned long flags;
2126
2127         spin_lock_irqsave(&device_domain_lock, flags);
2128         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
2129                 unlink_domain_info(info);
2130                 spin_unlock_irqrestore(&device_domain_lock, flags);
2131
2132                 iommu_disable_dev_iotlb(info);
2133                 iommu_detach_dev(info->iommu, info->bus, info->devfn);
2134
2135                 if (domain_type_is_vm(domain)) {
2136                         iommu_detach_dependent_devices(info->iommu, info->dev);
2137                         domain_detach_iommu(domain, info->iommu);
2138                 }
2139
2140                 free_devinfo_mem(info);
2141                 spin_lock_irqsave(&device_domain_lock, flags);
2142         }
2143         spin_unlock_irqrestore(&device_domain_lock, flags);
2144 }
2145
2146 /*
2147  * find_domain
2148  * Note: we use struct device->archdata.iommu stores the info
2149  */
2150 static struct dmar_domain *find_domain(struct device *dev)
2151 {
2152         struct device_domain_info *info;
2153
2154         /* No lock here, assumes no domain exit in normal case */
2155         info = dev->archdata.iommu;
2156         if (info)
2157                 return info->domain;
2158         return NULL;
2159 }
2160
2161 static inline struct device_domain_info *
2162 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2163 {
2164         struct device_domain_info *info;
2165
2166         list_for_each_entry(info, &device_domain_list, global)
2167                 if (info->iommu->segment == segment && info->bus == bus &&
2168                     info->devfn == devfn)
2169                         return info;
2170
2171         return NULL;
2172 }
2173
2174 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2175                                                 int bus, int devfn,
2176                                                 struct device *dev,
2177                                                 struct dmar_domain *domain)
2178 {
2179         struct dmar_domain *found = NULL;
2180         struct device_domain_info *info;
2181         unsigned long flags;
2182
2183         info = alloc_devinfo_mem();
2184         if (!info)
2185                 return NULL;
2186
2187         info->bus = bus;
2188         info->devfn = devfn;
2189         info->dev = dev;
2190         info->domain = domain;
2191         info->iommu = iommu;
2192
2193         spin_lock_irqsave(&device_domain_lock, flags);
2194         if (dev)
2195                 found = find_domain(dev);
2196         else {
2197                 struct device_domain_info *info2;
2198                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2199                 if (info2)
2200                         found = info2->domain;
2201         }
2202         if (found) {
2203                 spin_unlock_irqrestore(&device_domain_lock, flags);
2204                 free_devinfo_mem(info);
2205                 /* Caller must free the original domain */
2206                 return found;
2207         }
2208
2209         list_add(&info->link, &domain->devices);
2210         list_add(&info->global, &device_domain_list);
2211         if (dev)
2212                 dev->archdata.iommu = info;
2213         spin_unlock_irqrestore(&device_domain_lock, flags);
2214
2215         return domain;
2216 }
2217
2218 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2219 {
2220         *(u16 *)opaque = alias;
2221         return 0;
2222 }
2223
2224 /* domain is initialized */
2225 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2226 {
2227         struct dmar_domain *domain, *tmp;
2228         struct intel_iommu *iommu;
2229         struct device_domain_info *info;
2230         u16 dma_alias;
2231         unsigned long flags;
2232         u8 bus, devfn;
2233
2234         domain = find_domain(dev);
2235         if (domain)
2236                 return domain;
2237
2238         iommu = device_to_iommu(dev, &bus, &devfn);
2239         if (!iommu)
2240                 return NULL;
2241
2242         if (dev_is_pci(dev)) {
2243                 struct pci_dev *pdev = to_pci_dev(dev);
2244
2245                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2246
2247                 spin_lock_irqsave(&device_domain_lock, flags);
2248                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2249                                                       PCI_BUS_NUM(dma_alias),
2250                                                       dma_alias & 0xff);
2251                 if (info) {
2252                         iommu = info->iommu;
2253                         domain = info->domain;
2254                 }
2255                 spin_unlock_irqrestore(&device_domain_lock, flags);
2256
2257                 /* DMA alias already has a domain, uses it */
2258                 if (info)
2259                         goto found_domain;
2260         }
2261
2262         /* Allocate and initialize new domain for the device */
2263         domain = alloc_domain(0);
2264         if (!domain)
2265                 return NULL;
2266         domain->id = iommu_attach_domain(domain, iommu);
2267         if (domain->id < 0) {
2268                 free_domain_mem(domain);
2269                 return NULL;
2270         }
2271         domain_attach_iommu(domain, iommu);
2272         if (domain_init(domain, gaw)) {
2273                 domain_exit(domain);
2274                 return NULL;
2275         }
2276
2277         /* register PCI DMA alias device */
2278         if (dev_is_pci(dev)) {
2279                 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2280                                            dma_alias & 0xff, NULL, domain);
2281
2282                 if (!tmp || tmp != domain) {
2283                         domain_exit(domain);
2284                         domain = tmp;
2285                 }
2286
2287                 if (!domain)
2288                         return NULL;
2289         }
2290
2291 found_domain:
2292         tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2293
2294         if (!tmp || tmp != domain) {
2295                 domain_exit(domain);
2296                 domain = tmp;
2297         }
2298
2299         return domain;
2300 }
2301
2302 static int iommu_identity_mapping;
2303 #define IDENTMAP_ALL            1
2304 #define IDENTMAP_GFX            2
2305 #define IDENTMAP_AZALIA         4
2306
2307 static int iommu_domain_identity_map(struct dmar_domain *domain,
2308                                      unsigned long long start,
2309                                      unsigned long long end)
2310 {
2311         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2312         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2313
2314         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2315                           dma_to_mm_pfn(last_vpfn))) {
2316                 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2317                 return -ENOMEM;
2318         }
2319
2320         pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2321                  start, end, domain->id);
2322         /*
2323          * RMRR range might have overlap with physical memory range,
2324          * clear it first
2325          */
2326         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2327
2328         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2329                                   last_vpfn - first_vpfn + 1,
2330                                   DMA_PTE_READ|DMA_PTE_WRITE);
2331 }
2332
2333 static int iommu_prepare_identity_map(struct device *dev,
2334                                       unsigned long long start,
2335                                       unsigned long long end)
2336 {
2337         struct dmar_domain *domain;
2338         int ret;
2339
2340         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2341         if (!domain)
2342                 return -ENOMEM;
2343
2344         /* For _hardware_ passthrough, don't bother. But for software
2345            passthrough, we do it anyway -- it may indicate a memory
2346            range which is reserved in E820, so which didn't get set
2347            up to start with in si_domain */
2348         if (domain == si_domain && hw_pass_through) {
2349                 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2350                        dev_name(dev), start, end);
2351                 return 0;
2352         }
2353
2354         printk(KERN_INFO
2355                "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2356                dev_name(dev), start, end);
2357         
2358         if (end < start) {
2359                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2360                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2361                         dmi_get_system_info(DMI_BIOS_VENDOR),
2362                         dmi_get_system_info(DMI_BIOS_VERSION),
2363                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2364                 ret = -EIO;
2365                 goto error;
2366         }
2367
2368         if (end >> agaw_to_width(domain->agaw)) {
2369                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2370                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2371                      agaw_to_width(domain->agaw),
2372                      dmi_get_system_info(DMI_BIOS_VENDOR),
2373                      dmi_get_system_info(DMI_BIOS_VERSION),
2374                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2375                 ret = -EIO;
2376                 goto error;
2377         }
2378
2379         ret = iommu_domain_identity_map(domain, start, end);
2380         if (ret)
2381                 goto error;
2382
2383         /* context entry init */
2384         ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2385         if (ret)
2386                 goto error;
2387
2388         return 0;
2389
2390  error:
2391         domain_exit(domain);
2392         return ret;
2393 }
2394
2395 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2396                                          struct device *dev)
2397 {
2398         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2399                 return 0;
2400         return iommu_prepare_identity_map(dev, rmrr->base_address,
2401                                           rmrr->end_address);
2402 }
2403
2404 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2405 static inline void iommu_prepare_isa(void)
2406 {
2407         struct pci_dev *pdev;
2408         int ret;
2409
2410         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2411         if (!pdev)
2412                 return;
2413
2414         printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2415         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2416
2417         if (ret)
2418                 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2419                        "floppy might not work\n");
2420
2421         pci_dev_put(pdev);
2422 }
2423 #else
2424 static inline void iommu_prepare_isa(void)
2425 {
2426         return;
2427 }
2428 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2429
2430 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2431
2432 static int __init si_domain_init(int hw)
2433 {
2434         struct dmar_drhd_unit *drhd;
2435         struct intel_iommu *iommu;
2436         int nid, ret = 0;
2437         bool first = true;
2438
2439         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2440         if (!si_domain)
2441                 return -EFAULT;
2442
2443         for_each_active_iommu(iommu, drhd) {
2444                 ret = iommu_attach_domain(si_domain, iommu);
2445                 if (ret < 0) {
2446                         domain_exit(si_domain);
2447                         return -EFAULT;
2448                 } else if (first) {
2449                         si_domain->id = ret;
2450                         first = false;
2451                 } else if (si_domain->id != ret) {
2452                         domain_exit(si_domain);
2453                         return -EFAULT;
2454                 }
2455                 domain_attach_iommu(si_domain, iommu);
2456         }
2457
2458         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2459                 domain_exit(si_domain);
2460                 return -EFAULT;
2461         }
2462
2463         pr_debug("IOMMU: identity mapping domain is domain %d\n",
2464                  si_domain->id);
2465
2466         if (hw)
2467                 return 0;
2468
2469         for_each_online_node(nid) {
2470                 unsigned long start_pfn, end_pfn;
2471                 int i;
2472
2473                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2474                         ret = iommu_domain_identity_map(si_domain,
2475                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2476                         if (ret)
2477                                 return ret;
2478                 }
2479         }
2480
2481         return 0;
2482 }
2483
2484 static int identity_mapping(struct device *dev)
2485 {
2486         struct device_domain_info *info;
2487
2488         if (likely(!iommu_identity_mapping))
2489                 return 0;
2490
2491         info = dev->archdata.iommu;
2492         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2493                 return (info->domain == si_domain);
2494
2495         return 0;
2496 }
2497
2498 static int domain_add_dev_info(struct dmar_domain *domain,
2499                                struct device *dev, int translation)
2500 {
2501         struct dmar_domain *ndomain;
2502         struct intel_iommu *iommu;
2503         u8 bus, devfn;
2504         int ret;
2505
2506         iommu = device_to_iommu(dev, &bus, &devfn);
2507         if (!iommu)
2508                 return -ENODEV;
2509
2510         ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2511         if (ndomain != domain)
2512                 return -EBUSY;
2513
2514         ret = domain_context_mapping(domain, dev, translation);
2515         if (ret) {
2516                 domain_remove_one_dev_info(domain, dev);
2517                 return ret;
2518         }
2519
2520         return 0;
2521 }
2522
2523 static bool device_has_rmrr(struct device *dev)
2524 {
2525         struct dmar_rmrr_unit *rmrr;
2526         struct device *tmp;
2527         int i;
2528
2529         rcu_read_lock();
2530         for_each_rmrr_units(rmrr) {
2531                 /*
2532                  * Return TRUE if this RMRR contains the device that
2533                  * is passed in.
2534                  */
2535                 for_each_active_dev_scope(rmrr->devices,
2536                                           rmrr->devices_cnt, i, tmp)
2537                         if (tmp == dev) {
2538                                 rcu_read_unlock();
2539                                 return true;
2540                         }
2541         }
2542         rcu_read_unlock();
2543         return false;
2544 }
2545
2546 /*
2547  * There are a couple cases where we need to restrict the functionality of
2548  * devices associated with RMRRs.  The first is when evaluating a device for
2549  * identity mapping because problems exist when devices are moved in and out
2550  * of domains and their respective RMRR information is lost.  This means that
2551  * a device with associated RMRRs will never be in a "passthrough" domain.
2552  * The second is use of the device through the IOMMU API.  This interface
2553  * expects to have full control of the IOVA space for the device.  We cannot
2554  * satisfy both the requirement that RMRR access is maintained and have an
2555  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2556  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2557  * We therefore prevent devices associated with an RMRR from participating in
2558  * the IOMMU API, which eliminates them from device assignment.
2559  *
2560  * In both cases we assume that PCI USB devices with RMRRs have them largely
2561  * for historical reasons and that the RMRR space is not actively used post
2562  * boot.  This exclusion may change if vendors begin to abuse it.
2563  */
2564 static bool device_is_rmrr_locked(struct device *dev)
2565 {
2566         if (!device_has_rmrr(dev))
2567                 return false;
2568
2569         if (dev_is_pci(dev)) {
2570                 struct pci_dev *pdev = to_pci_dev(dev);
2571
2572                 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2573                         return false;
2574         }
2575
2576         return true;
2577 }
2578
2579 static int iommu_should_identity_map(struct device *dev, int startup)
2580 {
2581
2582         if (dev_is_pci(dev)) {
2583                 struct pci_dev *pdev = to_pci_dev(dev);
2584
2585                 if (device_is_rmrr_locked(dev))
2586                         return 0;
2587
2588                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2589                         return 1;
2590
2591                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2592                         return 1;
2593
2594                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2595                         return 0;
2596
2597                 /*
2598                  * We want to start off with all devices in the 1:1 domain, and
2599                  * take them out later if we find they can't access all of memory.
2600                  *
2601                  * However, we can't do this for PCI devices behind bridges,
2602                  * because all PCI devices behind the same bridge will end up
2603                  * with the same source-id on their transactions.
2604                  *
2605                  * Practically speaking, we can't change things around for these
2606                  * devices at run-time, because we can't be sure there'll be no
2607                  * DMA transactions in flight for any of their siblings.
2608                  *
2609                  * So PCI devices (unless they're on the root bus) as well as
2610                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2611                  * the 1:1 domain, just in _case_ one of their siblings turns out
2612                  * not to be able to map all of memory.
2613                  */
2614                 if (!pci_is_pcie(pdev)) {
2615                         if (!pci_is_root_bus(pdev->bus))
2616                                 return 0;
2617                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2618                                 return 0;
2619                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2620                         return 0;
2621         } else {
2622                 if (device_has_rmrr(dev))
2623                         return 0;
2624         }
2625
2626         /*
2627          * At boot time, we don't yet know if devices will be 64-bit capable.
2628          * Assume that they will — if they turn out not to be, then we can
2629          * take them out of the 1:1 domain later.
2630          */
2631         if (!startup) {
2632                 /*
2633                  * If the device's dma_mask is less than the system's memory
2634                  * size then this is not a candidate for identity mapping.
2635                  */
2636                 u64 dma_mask = *dev->dma_mask;
2637
2638                 if (dev->coherent_dma_mask &&
2639                     dev->coherent_dma_mask < dma_mask)
2640                         dma_mask = dev->coherent_dma_mask;
2641
2642                 return dma_mask >= dma_get_required_mask(dev);
2643         }
2644
2645         return 1;
2646 }
2647
2648 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2649 {
2650         int ret;
2651
2652         if (!iommu_should_identity_map(dev, 1))
2653                 return 0;
2654
2655         ret = domain_add_dev_info(si_domain, dev,
2656                                   hw ? CONTEXT_TT_PASS_THROUGH :
2657                                        CONTEXT_TT_MULTI_LEVEL);
2658         if (!ret)
2659                 pr_info("IOMMU: %s identity mapping for device %s\n",
2660                         hw ? "hardware" : "software", dev_name(dev));
2661         else if (ret == -ENODEV)
2662                 /* device not associated with an iommu */
2663                 ret = 0;
2664
2665         return ret;
2666 }
2667
2668
2669 static int __init iommu_prepare_static_identity_mapping(int hw)
2670 {
2671         struct pci_dev *pdev = NULL;
2672         struct dmar_drhd_unit *drhd;
2673         struct intel_iommu *iommu;
2674         struct device *dev;
2675         int i;
2676         int ret = 0;
2677
2678         ret = si_domain_init(hw);
2679         if (ret)
2680                 return -EFAULT;
2681
2682         for_each_pci_dev(pdev) {
2683                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2684                 if (ret)
2685                         return ret;
2686         }
2687
2688         for_each_active_iommu(iommu, drhd)
2689                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2690                         struct acpi_device_physical_node *pn;
2691                         struct acpi_device *adev;
2692
2693                         if (dev->bus != &acpi_bus_type)
2694                                 continue;
2695                                 
2696                         adev= to_acpi_device(dev);
2697                         mutex_lock(&adev->physical_node_lock);
2698                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2699                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2700                                 if (ret)
2701                                         break;
2702                         }
2703                         mutex_unlock(&adev->physical_node_lock);
2704                         if (ret)
2705                                 return ret;
2706                 }
2707
2708         return 0;
2709 }
2710
2711 static int __init init_dmars(void)
2712 {
2713         struct dmar_drhd_unit *drhd;
2714         struct dmar_rmrr_unit *rmrr;
2715         struct device *dev;
2716         struct intel_iommu *iommu;
2717         int i, ret;
2718
2719         /*
2720          * for each drhd
2721          *    allocate root
2722          *    initialize and program root entry to not present
2723          * endfor
2724          */
2725         for_each_drhd_unit(drhd) {
2726                 /*
2727                  * lock not needed as this is only incremented in the single
2728                  * threaded kernel __init code path all other access are read
2729                  * only
2730                  */
2731                 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2732                         g_num_of_iommus++;
2733                         continue;
2734                 }
2735                 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2736                           IOMMU_UNITS_SUPPORTED);
2737         }
2738
2739         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2740                         GFP_KERNEL);
2741         if (!g_iommus) {
2742                 printk(KERN_ERR "Allocating global iommu array failed\n");
2743                 ret = -ENOMEM;
2744                 goto error;
2745         }
2746
2747         deferred_flush = kzalloc(g_num_of_iommus *
2748                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2749         if (!deferred_flush) {
2750                 ret = -ENOMEM;
2751                 goto free_g_iommus;
2752         }
2753
2754         for_each_active_iommu(iommu, drhd) {
2755                 g_iommus[iommu->seq_id] = iommu;
2756
2757                 ret = iommu_init_domains(iommu);
2758                 if (ret)
2759                         goto free_iommu;
2760
2761                 /*
2762                  * TBD:
2763                  * we could share the same root & context tables
2764                  * among all IOMMU's. Need to Split it later.
2765                  */
2766                 ret = iommu_alloc_root_entry(iommu);
2767                 if (ret) {
2768                         printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2769                         goto free_iommu;
2770                 }
2771                 if (!ecap_pass_through(iommu->ecap))
2772                         hw_pass_through = 0;
2773         }
2774
2775         /*
2776          * Start from the sane iommu hardware state.
2777          */
2778         for_each_active_iommu(iommu, drhd) {
2779                 /*
2780                  * If the queued invalidation is already initialized by us
2781                  * (for example, while enabling interrupt-remapping) then
2782                  * we got the things already rolling from a sane state.
2783                  */
2784                 if (iommu->qi)
2785                         continue;
2786
2787                 /*
2788                  * Clear any previous faults.
2789                  */
2790                 dmar_fault(-1, iommu);
2791                 /*
2792                  * Disable queued invalidation if supported and already enabled
2793                  * before OS handover.
2794                  */
2795                 dmar_disable_qi(iommu);
2796         }
2797
2798         for_each_active_iommu(iommu, drhd) {
2799                 if (dmar_enable_qi(iommu)) {
2800                         /*
2801                          * Queued Invalidate not enabled, use Register Based
2802                          * Invalidate
2803                          */
2804                         iommu->flush.flush_context = __iommu_flush_context;
2805                         iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2806                         printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2807                                "invalidation\n",
2808                                 iommu->seq_id,
2809                                (unsigned long long)drhd->reg_base_addr);
2810                 } else {
2811                         iommu->flush.flush_context = qi_flush_context;
2812                         iommu->flush.flush_iotlb = qi_flush_iotlb;
2813                         printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2814                                "invalidation\n",
2815                                 iommu->seq_id,
2816                                (unsigned long long)drhd->reg_base_addr);
2817                 }
2818         }
2819
2820         if (iommu_pass_through)
2821                 iommu_identity_mapping |= IDENTMAP_ALL;
2822
2823 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2824         iommu_identity_mapping |= IDENTMAP_GFX;
2825 #endif
2826
2827         check_tylersburg_isoch();
2828
2829         /*
2830          * If pass through is not set or not enabled, setup context entries for
2831          * identity mappings for rmrr, gfx, and isa and may fall back to static
2832          * identity mapping if iommu_identity_mapping is set.
2833          */
2834         if (iommu_identity_mapping) {
2835                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2836                 if (ret) {
2837                         printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2838                         goto free_iommu;
2839                 }
2840         }
2841         /*
2842          * For each rmrr
2843          *   for each dev attached to rmrr
2844          *   do
2845          *     locate drhd for dev, alloc domain for dev
2846          *     allocate free domain
2847          *     allocate page table entries for rmrr
2848          *     if context not allocated for bus
2849          *           allocate and init context
2850          *           set present in root table for this bus
2851          *     init context with domain, translation etc
2852          *    endfor
2853          * endfor
2854          */
2855         printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2856         for_each_rmrr_units(rmrr) {
2857                 /* some BIOS lists non-exist devices in DMAR table. */
2858                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2859                                           i, dev) {
2860                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
2861                         if (ret)
2862                                 printk(KERN_ERR
2863                                        "IOMMU: mapping reserved region failed\n");
2864                 }
2865         }
2866
2867         iommu_prepare_isa();
2868
2869         /*
2870          * for each drhd
2871          *   enable fault log
2872          *   global invalidate context cache
2873          *   global invalidate iotlb
2874          *   enable translation
2875          */
2876         for_each_iommu(iommu, drhd) {
2877                 if (drhd->ignored) {
2878                         /*
2879                          * we always have to disable PMRs or DMA may fail on
2880                          * this device
2881                          */
2882                         if (force_on)
2883                                 iommu_disable_protect_mem_regions(iommu);
2884                         continue;
2885                 }
2886
2887                 iommu_flush_write_buffer(iommu);
2888
2889                 ret = dmar_set_interrupt(iommu);
2890                 if (ret)
2891                         goto free_iommu;
2892
2893                 iommu_set_root_entry(iommu);
2894
2895                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2896                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2897                 iommu_enable_translation(iommu);
2898                 iommu_disable_protect_mem_regions(iommu);
2899         }
2900
2901         return 0;
2902
2903 free_iommu:
2904         for_each_active_iommu(iommu, drhd)
2905                 free_dmar_iommu(iommu);
2906         kfree(deferred_flush);
2907 free_g_iommus:
2908         kfree(g_iommus);
2909 error:
2910         return ret;
2911 }
2912
2913 /* This takes a number of _MM_ pages, not VTD pages */
2914 static struct iova *intel_alloc_iova(struct device *dev,
2915                                      struct dmar_domain *domain,
2916                                      unsigned long nrpages, uint64_t dma_mask)
2917 {
2918         struct iova *iova = NULL;
2919
2920         /* Restrict dma_mask to the width that the iommu can handle */
2921         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2922
2923         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2924                 /*
2925                  * First try to allocate an io virtual address in
2926                  * DMA_BIT_MASK(32) and if that fails then try allocating
2927                  * from higher range
2928                  */
2929                 iova = alloc_iova(&domain->iovad, nrpages,
2930                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
2931                 if (iova)
2932                         return iova;
2933         }
2934         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2935         if (unlikely(!iova)) {
2936                 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2937                        nrpages, dev_name(dev));
2938                 return NULL;
2939         }
2940
2941         return iova;
2942 }
2943
2944 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
2945 {
2946         struct dmar_domain *domain;
2947         int ret;
2948
2949         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2950         if (!domain) {
2951                 printk(KERN_ERR "Allocating domain for %s failed",
2952                        dev_name(dev));
2953                 return NULL;
2954         }
2955
2956         /* make sure context mapping is ok */
2957         if (unlikely(!domain_context_mapped(dev))) {
2958                 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2959                 if (ret) {
2960                         printk(KERN_ERR "Domain context map for %s failed",
2961                                dev_name(dev));
2962                         return NULL;
2963                 }
2964         }
2965
2966         return domain;
2967 }
2968
2969 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2970 {
2971         struct device_domain_info *info;
2972
2973         /* No lock here, assumes no domain exit in normal case */
2974         info = dev->archdata.iommu;
2975         if (likely(info))
2976                 return info->domain;
2977
2978         return __get_valid_domain_for_dev(dev);
2979 }
2980
2981 static int iommu_dummy(struct device *dev)
2982 {
2983         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2984 }
2985
2986 /* Check if the dev needs to go through non-identity map and unmap process.*/
2987 static int iommu_no_mapping(struct device *dev)
2988 {
2989         int found;
2990
2991         if (iommu_dummy(dev))
2992                 return 1;
2993
2994         if (!iommu_identity_mapping)
2995                 return 0;
2996
2997         found = identity_mapping(dev);
2998         if (found) {
2999                 if (iommu_should_identity_map(dev, 0))
3000                         return 1;
3001                 else {
3002                         /*
3003                          * 32 bit DMA is removed from si_domain and fall back
3004                          * to non-identity mapping.
3005                          */
3006                         domain_remove_one_dev_info(si_domain, dev);
3007                         printk(KERN_INFO "32bit %s uses non-identity mapping\n",
3008                                dev_name(dev));
3009                         return 0;
3010                 }
3011         } else {
3012                 /*
3013                  * In case of a detached 64 bit DMA device from vm, the device
3014                  * is put into si_domain for identity mapping.
3015                  */
3016                 if (iommu_should_identity_map(dev, 0)) {
3017                         int ret;
3018                         ret = domain_add_dev_info(si_domain, dev,
3019                                                   hw_pass_through ?
3020                                                   CONTEXT_TT_PASS_THROUGH :
3021                                                   CONTEXT_TT_MULTI_LEVEL);
3022                         if (!ret) {
3023                                 printk(KERN_INFO "64bit %s uses identity mapping\n",
3024                                        dev_name(dev));
3025                                 return 1;
3026                         }
3027                 }
3028         }
3029
3030         return 0;
3031 }
3032
3033 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3034                                      size_t size, int dir, u64 dma_mask)
3035 {
3036         struct dmar_domain *domain;
3037         phys_addr_t start_paddr;
3038         struct iova *iova;
3039         int prot = 0;
3040         int ret;
3041         struct intel_iommu *iommu;
3042         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3043
3044         BUG_ON(dir == DMA_NONE);
3045
3046         if (iommu_no_mapping(dev))
3047                 return paddr;
3048
3049         domain = get_valid_domain_for_dev(dev);
3050         if (!domain)
3051                 return 0;
3052
3053         iommu = domain_get_iommu(domain);
3054         size = aligned_nrpages(paddr, size);
3055
3056         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3057         if (!iova)
3058                 goto error;
3059
3060         /*
3061          * Check if DMAR supports zero-length reads on write only
3062          * mappings..
3063          */
3064         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3065                         !cap_zlr(iommu->cap))
3066                 prot |= DMA_PTE_READ;
3067         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3068                 prot |= DMA_PTE_WRITE;
3069         /*
3070          * paddr - (paddr + size) might be partial page, we should map the whole
3071          * page.  Note: if two part of one page are separately mapped, we
3072          * might have two guest_addr mapping to the same host paddr, but this
3073          * is not a big problem
3074          */
3075         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3076                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3077         if (ret)
3078                 goto error;
3079
3080         /* it's a non-present to present mapping. Only flush if caching mode */
3081         if (cap_caching_mode(iommu->cap))
3082                 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3083         else
3084                 iommu_flush_write_buffer(iommu);
3085
3086         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3087         start_paddr += paddr & ~PAGE_MASK;
3088         return start_paddr;
3089
3090 error:
3091         if (iova)
3092                 __free_iova(&domain->iovad, iova);
3093         printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3094                 dev_name(dev), size, (unsigned long long)paddr, dir);
3095         return 0;
3096 }
3097
3098 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3099                                  unsigned long offset, size_t size,
3100                                  enum dma_data_direction dir,
3101                                  struct dma_attrs *attrs)
3102 {
3103         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3104                                   dir, *dev->dma_mask);
3105 }
3106
3107 static void flush_unmaps(void)
3108 {
3109         int i, j;
3110
3111         timer_on = 0;
3112
3113         /* just flush them all */
3114         for (i = 0; i < g_num_of_iommus; i++) {
3115                 struct intel_iommu *iommu = g_iommus[i];
3116                 if (!iommu)
3117                         continue;
3118
3119                 if (!deferred_flush[i].next)
3120                         continue;
3121
3122                 /* In caching mode, global flushes turn emulation expensive */
3123                 if (!cap_caching_mode(iommu->cap))
3124                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3125                                          DMA_TLB_GLOBAL_FLUSH);
3126                 for (j = 0; j < deferred_flush[i].next; j++) {
3127                         unsigned long mask;
3128                         struct iova *iova = deferred_flush[i].iova[j];
3129                         struct dmar_domain *domain = deferred_flush[i].domain[j];
3130
3131                         /* On real hardware multiple invalidations are expensive */
3132                         if (cap_caching_mode(iommu->cap))
3133                                 iommu_flush_iotlb_psi(iommu, domain->id,
3134                                         iova->pfn_lo, iova_size(iova),
3135                                         !deferred_flush[i].freelist[j], 0);
3136                         else {
3137                                 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3138                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3139                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3140                         }
3141                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3142                         if (deferred_flush[i].freelist[j])
3143                                 dma_free_pagelist(deferred_flush[i].freelist[j]);
3144                 }
3145                 deferred_flush[i].next = 0;
3146         }
3147
3148         list_size = 0;
3149 }
3150
3151 static void flush_unmaps_timeout(unsigned long data)
3152 {
3153         unsigned long flags;
3154
3155         spin_lock_irqsave(&async_umap_flush_lock, flags);
3156         flush_unmaps();
3157         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3158 }
3159
3160 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3161 {
3162         unsigned long flags;
3163         int next, iommu_id;
3164         struct intel_iommu *iommu;
3165
3166         spin_lock_irqsave(&async_umap_flush_lock, flags);
3167         if (list_size == HIGH_WATER_MARK)
3168                 flush_unmaps();
3169
3170         iommu = domain_get_iommu(dom);
3171         iommu_id = iommu->seq_id;
3172
3173         next = deferred_flush[iommu_id].next;
3174         deferred_flush[iommu_id].domain[next] = dom;
3175         deferred_flush[iommu_id].iova[next] = iova;
3176         deferred_flush[iommu_id].freelist[next] = freelist;
3177         deferred_flush[iommu_id].next++;
3178
3179         if (!timer_on) {
3180                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3181                 timer_on = 1;
3182         }
3183         list_size++;
3184         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3185 }
3186
3187 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3188 {
3189         struct dmar_domain *domain;
3190         unsigned long start_pfn, last_pfn;
3191         struct iova *iova;
3192         struct intel_iommu *iommu;
3193         struct page *freelist;
3194
3195         if (iommu_no_mapping(dev))
3196                 return;
3197
3198         domain = find_domain(dev);
3199         BUG_ON(!domain);
3200
3201         iommu = domain_get_iommu(domain);
3202
3203         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3204         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3205                       (unsigned long long)dev_addr))
3206                 return;
3207
3208         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3209         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3210
3211         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3212                  dev_name(dev), start_pfn, last_pfn);
3213
3214         freelist = domain_unmap(domain, start_pfn, last_pfn);
3215
3216         if (intel_iommu_strict) {
3217                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3218                                       last_pfn - start_pfn + 1, !freelist, 0);
3219                 /* free iova */
3220                 __free_iova(&domain->iovad, iova);
3221                 dma_free_pagelist(freelist);
3222         } else {
3223                 add_unmap(domain, iova, freelist);
3224                 /*
3225                  * queue up the release of the unmap to save the 1/6th of the
3226                  * cpu used up by the iotlb flush operation...
3227                  */
3228         }
3229 }
3230
3231 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3232                              size_t size, enum dma_data_direction dir,
3233                              struct dma_attrs *attrs)
3234 {
3235         intel_unmap(dev, dev_addr);
3236 }
3237
3238 static void *intel_alloc_coherent(struct device *dev, size_t size,
3239                                   dma_addr_t *dma_handle, gfp_t flags,
3240                                   struct dma_attrs *attrs)
3241 {
3242         struct page *page = NULL;
3243         int order;
3244
3245         size = PAGE_ALIGN(size);
3246         order = get_order(size);
3247
3248         if (!iommu_no_mapping(dev))
3249                 flags &= ~(GFP_DMA | GFP_DMA32);
3250         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3251                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3252                         flags |= GFP_DMA;
3253                 else
3254                         flags |= GFP_DMA32;
3255         }
3256
3257         if (flags & __GFP_WAIT) {
3258                 unsigned int count = size >> PAGE_SHIFT;
3259
3260                 page = dma_alloc_from_contiguous(dev, count, order);
3261                 if (page && iommu_no_mapping(dev) &&
3262                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3263                         dma_release_from_contiguous(dev, page, count);
3264                         page = NULL;
3265                 }
3266         }
3267
3268         if (!page)
3269                 page = alloc_pages(flags, order);
3270         if (!page)
3271                 return NULL;
3272         memset(page_address(page), 0, size);
3273
3274         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3275                                          DMA_BIDIRECTIONAL,
3276                                          dev->coherent_dma_mask);
3277         if (*dma_handle)
3278                 return page_address(page);
3279         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3280                 __free_pages(page, order);
3281
3282         return NULL;
3283 }
3284
3285 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3286                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3287 {
3288         int order;
3289         struct page *page = virt_to_page(vaddr);
3290
3291         size = PAGE_ALIGN(size);
3292         order = get_order(size);
3293
3294         intel_unmap(dev, dma_handle);
3295         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3296                 __free_pages(page, order);
3297 }
3298
3299 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3300                            int nelems, enum dma_data_direction dir,
3301                            struct dma_attrs *attrs)
3302 {
3303         intel_unmap(dev, sglist[0].dma_address);
3304 }
3305
3306 static int intel_nontranslate_map_sg(struct device *hddev,
3307         struct scatterlist *sglist, int nelems, int dir)
3308 {
3309         int i;
3310         struct scatterlist *sg;
3311
3312         for_each_sg(sglist, sg, nelems, i) {
3313                 BUG_ON(!sg_page(sg));
3314                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3315                 sg->dma_length = sg->length;
3316         }
3317         return nelems;
3318 }
3319
3320 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3321                         enum dma_data_direction dir, struct dma_attrs *attrs)
3322 {
3323         int i;
3324         struct dmar_domain *domain;
3325         size_t size = 0;
3326         int prot = 0;
3327         struct iova *iova = NULL;
3328         int ret;
3329         struct scatterlist *sg;
3330         unsigned long start_vpfn;
3331         struct intel_iommu *iommu;
3332
3333         BUG_ON(dir == DMA_NONE);
3334         if (iommu_no_mapping(dev))
3335                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3336
3337         domain = get_valid_domain_for_dev(dev);
3338         if (!domain)
3339                 return 0;
3340
3341         iommu = domain_get_iommu(domain);
3342
3343         for_each_sg(sglist, sg, nelems, i)
3344                 size += aligned_nrpages(sg->offset, sg->length);
3345
3346         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3347                                 *dev->dma_mask);
3348         if (!iova) {
3349                 sglist->dma_length = 0;
3350                 return 0;
3351         }
3352
3353         /*
3354          * Check if DMAR supports zero-length reads on write only
3355          * mappings..
3356          */
3357         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3358                         !cap_zlr(iommu->cap))
3359                 prot |= DMA_PTE_READ;
3360         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3361                 prot |= DMA_PTE_WRITE;
3362
3363         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3364
3365         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3366         if (unlikely(ret)) {
3367                 dma_pte_free_pagetable(domain, start_vpfn,
3368                                        start_vpfn + size - 1);
3369                 __free_iova(&domain->iovad, iova);
3370                 return 0;
3371         }
3372
3373         /* it's a non-present to present mapping. Only flush if caching mode */
3374         if (cap_caching_mode(iommu->cap))
3375                 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3376         else
3377                 iommu_flush_write_buffer(iommu);
3378
3379         return nelems;
3380 }
3381
3382 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3383 {
3384         return !dma_addr;
3385 }
3386
3387 struct dma_map_ops intel_dma_ops = {
3388         .alloc = intel_alloc_coherent,
3389         .free = intel_free_coherent,
3390         .map_sg = intel_map_sg,
3391         .unmap_sg = intel_unmap_sg,
3392         .map_page = intel_map_page,
3393         .unmap_page = intel_unmap_page,
3394         .mapping_error = intel_mapping_error,
3395 };
3396
3397 static inline int iommu_domain_cache_init(void)
3398 {
3399         int ret = 0;
3400
3401         iommu_domain_cache = kmem_cache_create("iommu_domain",
3402                                          sizeof(struct dmar_domain),
3403                                          0,
3404                                          SLAB_HWCACHE_ALIGN,
3405
3406                                          NULL);
3407         if (!iommu_domain_cache) {
3408                 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3409                 ret = -ENOMEM;
3410         }
3411
3412         return ret;
3413 }
3414
3415 static inline int iommu_devinfo_cache_init(void)
3416 {
3417         int ret = 0;
3418
3419         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3420                                          sizeof(struct device_domain_info),
3421                                          0,
3422                                          SLAB_HWCACHE_ALIGN,
3423                                          NULL);
3424         if (!iommu_devinfo_cache) {
3425                 printk(KERN_ERR "Couldn't create devinfo cache\n");
3426                 ret = -ENOMEM;
3427         }
3428
3429         return ret;
3430 }
3431
3432 static inline int iommu_iova_cache_init(void)
3433 {
3434         int ret = 0;
3435
3436         iommu_iova_cache = kmem_cache_create("iommu_iova",
3437                                          sizeof(struct iova),
3438                                          0,
3439                                          SLAB_HWCACHE_ALIGN,
3440                                          NULL);
3441         if (!iommu_iova_cache) {
3442                 printk(KERN_ERR "Couldn't create iova cache\n");
3443                 ret = -ENOMEM;
3444         }
3445
3446         return ret;
3447 }
3448
3449 static int __init iommu_init_mempool(void)
3450 {
3451         int ret;
3452         ret = iommu_iova_cache_init();
3453         if (ret)
3454                 return ret;
3455
3456         ret = iommu_domain_cache_init();
3457         if (ret)
3458                 goto domain_error;
3459
3460         ret = iommu_devinfo_cache_init();
3461         if (!ret)
3462                 return ret;
3463
3464         kmem_cache_destroy(iommu_domain_cache);
3465 domain_error:
3466         kmem_cache_destroy(iommu_iova_cache);
3467
3468         return -ENOMEM;
3469 }
3470
3471 static void __init iommu_exit_mempool(void)
3472 {
3473         kmem_cache_destroy(iommu_devinfo_cache);
3474         kmem_cache_destroy(iommu_domain_cache);
3475         kmem_cache_destroy(iommu_iova_cache);
3476
3477 }
3478
3479 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3480 {
3481         struct dmar_drhd_unit *drhd;
3482         u32 vtbar;
3483         int rc;
3484
3485         /* We know that this device on this chipset has its own IOMMU.
3486          * If we find it under a different IOMMU, then the BIOS is lying
3487          * to us. Hope that the IOMMU for this device is actually
3488          * disabled, and it needs no translation...
3489          */
3490         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3491         if (rc) {
3492                 /* "can't" happen */
3493                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3494                 return;
3495         }
3496         vtbar &= 0xffff0000;
3497
3498         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3499         drhd = dmar_find_matched_drhd_unit(pdev);
3500         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3501                             TAINT_FIRMWARE_WORKAROUND,
3502                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3503                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3504 }
3505 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3506
3507 static void __init init_no_remapping_devices(void)
3508 {
3509         struct dmar_drhd_unit *drhd;
3510         struct device *dev;
3511         int i;
3512
3513         for_each_drhd_unit(drhd) {
3514                 if (!drhd->include_all) {
3515                         for_each_active_dev_scope(drhd->devices,
3516                                                   drhd->devices_cnt, i, dev)
3517                                 break;
3518                         /* ignore DMAR unit if no devices exist */
3519                         if (i == drhd->devices_cnt)
3520                                 drhd->ignored = 1;
3521                 }
3522         }
3523
3524         for_each_active_drhd_unit(drhd) {
3525                 if (drhd->include_all)
3526                         continue;
3527
3528                 for_each_active_dev_scope(drhd->devices,
3529                                           drhd->devices_cnt, i, dev)
3530                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3531                                 break;
3532                 if (i < drhd->devices_cnt)
3533                         continue;
3534
3535                 /* This IOMMU has *only* gfx devices. Either bypass it or
3536                    set the gfx_mapped flag, as appropriate */
3537                 if (dmar_map_gfx) {
3538                         intel_iommu_gfx_mapped = 1;
3539                 } else {
3540                         drhd->ignored = 1;
3541                         for_each_active_dev_scope(drhd->devices,
3542                                                   drhd->devices_cnt, i, dev)
3543                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3544                 }
3545         }
3546 }
3547
3548 #ifdef CONFIG_SUSPEND
3549 static int init_iommu_hw(void)
3550 {
3551         struct dmar_drhd_unit *drhd;
3552         struct intel_iommu *iommu = NULL;
3553
3554         for_each_active_iommu(iommu, drhd)
3555                 if (iommu->qi)
3556                         dmar_reenable_qi(iommu);
3557
3558         for_each_iommu(iommu, drhd) {
3559                 if (drhd->ignored) {
3560                         /*
3561                          * we always have to disable PMRs or DMA may fail on
3562                          * this device
3563                          */
3564                         if (force_on)
3565                                 iommu_disable_protect_mem_regions(iommu);
3566                         continue;
3567                 }
3568         
3569                 iommu_flush_write_buffer(iommu);
3570
3571                 iommu_set_root_entry(iommu);
3572
3573                 iommu->flush.flush_context(iommu, 0, 0, 0,
3574                                            DMA_CCMD_GLOBAL_INVL);
3575                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3576                 iommu_enable_translation(iommu);
3577                 iommu_disable_protect_mem_regions(iommu);
3578         }
3579
3580         return 0;
3581 }
3582
3583 static void iommu_flush_all(void)
3584 {
3585         struct dmar_drhd_unit *drhd;
3586         struct intel_iommu *iommu;
3587
3588         for_each_active_iommu(iommu, drhd) {
3589                 iommu->flush.flush_context(iommu, 0, 0, 0,
3590                                            DMA_CCMD_GLOBAL_INVL);
3591                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3592                                          DMA_TLB_GLOBAL_FLUSH);
3593         }
3594 }
3595
3596 static int iommu_suspend(void)
3597 {
3598         struct dmar_drhd_unit *drhd;
3599         struct intel_iommu *iommu = NULL;
3600         unsigned long flag;
3601
3602         for_each_active_iommu(iommu, drhd) {
3603                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3604                                                  GFP_ATOMIC);
3605                 if (!iommu->iommu_state)
3606                         goto nomem;
3607         }
3608
3609         iommu_flush_all();
3610
3611         for_each_active_iommu(iommu, drhd) {
3612                 iommu_disable_translation(iommu);
3613
3614                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3615
3616                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3617                         readl(iommu->reg + DMAR_FECTL_REG);
3618                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3619                         readl(iommu->reg + DMAR_FEDATA_REG);
3620                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3621                         readl(iommu->reg + DMAR_FEADDR_REG);
3622                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3623                         readl(iommu->reg + DMAR_FEUADDR_REG);
3624
3625                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3626         }
3627         return 0;
3628
3629 nomem:
3630         for_each_active_iommu(iommu, drhd)
3631                 kfree(iommu->iommu_state);
3632
3633         return -ENOMEM;
3634 }
3635
3636 static void iommu_resume(void)
3637 {
3638         struct dmar_drhd_unit *drhd;
3639         struct intel_iommu *iommu = NULL;
3640         unsigned long flag;
3641
3642         if (init_iommu_hw()) {
3643                 if (force_on)
3644                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3645                 else
3646                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3647                 return;
3648         }
3649
3650         for_each_active_iommu(iommu, drhd) {
3651
3652                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3653
3654                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3655                         iommu->reg + DMAR_FECTL_REG);
3656                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3657                         iommu->reg + DMAR_FEDATA_REG);
3658                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3659                         iommu->reg + DMAR_FEADDR_REG);
3660                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3661                         iommu->reg + DMAR_FEUADDR_REG);
3662
3663                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3664         }
3665
3666         for_each_active_iommu(iommu, drhd)
3667                 kfree(iommu->iommu_state);
3668 }
3669
3670 static struct syscore_ops iommu_syscore_ops = {
3671         .resume         = iommu_resume,
3672         .suspend        = iommu_suspend,
3673 };
3674
3675 static void __init init_iommu_pm_ops(void)
3676 {
3677         register_syscore_ops(&iommu_syscore_ops);
3678 }
3679
3680 #else
3681 static inline void init_iommu_pm_ops(void) {}
3682 #endif  /* CONFIG_PM */
3683
3684
3685 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3686 {
3687         struct acpi_dmar_reserved_memory *rmrr;
3688         struct dmar_rmrr_unit *rmrru;
3689
3690         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3691         if (!rmrru)
3692                 return -ENOMEM;
3693
3694         rmrru->hdr = header;
3695         rmrr = (struct acpi_dmar_reserved_memory *)header;
3696         rmrru->base_address = rmrr->base_address;
3697         rmrru->end_address = rmrr->end_address;
3698         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3699                                 ((void *)rmrr) + rmrr->header.length,
3700                                 &rmrru->devices_cnt);
3701         if (rmrru->devices_cnt && rmrru->devices == NULL) {
3702                 kfree(rmrru);
3703                 return -ENOMEM;
3704         }
3705
3706         list_add(&rmrru->list, &dmar_rmrr_units);
3707
3708         return 0;
3709 }
3710
3711 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3712 {
3713         struct acpi_dmar_atsr *atsr;
3714         struct dmar_atsr_unit *atsru;
3715
3716         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3717         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3718         if (!atsru)
3719                 return -ENOMEM;
3720
3721         atsru->hdr = hdr;
3722         atsru->include_all = atsr->flags & 0x1;
3723         if (!atsru->include_all) {
3724                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3725                                 (void *)atsr + atsr->header.length,
3726                                 &atsru->devices_cnt);
3727                 if (atsru->devices_cnt && atsru->devices == NULL) {
3728                         kfree(atsru);
3729                         return -ENOMEM;
3730                 }
3731         }
3732
3733         list_add_rcu(&atsru->list, &dmar_atsr_units);
3734
3735         return 0;
3736 }
3737
3738 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3739 {
3740         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3741         kfree(atsru);
3742 }
3743
3744 static void intel_iommu_free_dmars(void)
3745 {
3746         struct dmar_rmrr_unit *rmrru, *rmrr_n;
3747         struct dmar_atsr_unit *atsru, *atsr_n;
3748
3749         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3750                 list_del(&rmrru->list);
3751                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3752                 kfree(rmrru);
3753         }
3754
3755         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3756                 list_del(&atsru->list);
3757                 intel_iommu_free_atsr(atsru);
3758         }
3759 }
3760
3761 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3762 {
3763         int i, ret = 1;
3764         struct pci_bus *bus;
3765         struct pci_dev *bridge = NULL;
3766         struct device *tmp;
3767         struct acpi_dmar_atsr *atsr;
3768         struct dmar_atsr_unit *atsru;
3769
3770         dev = pci_physfn(dev);
3771         for (bus = dev->bus; bus; bus = bus->parent) {
3772                 bridge = bus->self;
3773                 if (!bridge || !pci_is_pcie(bridge) ||
3774                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3775                         return 0;
3776                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3777                         break;
3778         }
3779         if (!bridge)
3780                 return 0;
3781
3782         rcu_read_lock();
3783         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3784                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3785                 if (atsr->segment != pci_domain_nr(dev->bus))
3786                         continue;
3787
3788                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3789                         if (tmp == &bridge->dev)
3790                                 goto out;
3791
3792                 if (atsru->include_all)
3793                         goto out;
3794         }
3795         ret = 0;
3796 out:
3797         rcu_read_unlock();
3798
3799         return ret;
3800 }
3801
3802 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3803 {
3804         int ret = 0;
3805         struct dmar_rmrr_unit *rmrru;
3806         struct dmar_atsr_unit *atsru;
3807         struct acpi_dmar_atsr *atsr;
3808         struct acpi_dmar_reserved_memory *rmrr;
3809
3810         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3811                 return 0;
3812
3813         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3814                 rmrr = container_of(rmrru->hdr,
3815                                     struct acpi_dmar_reserved_memory, header);
3816                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3817                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3818                                 ((void *)rmrr) + rmrr->header.length,
3819                                 rmrr->segment, rmrru->devices,
3820                                 rmrru->devices_cnt);
3821                         if(ret < 0)
3822                                 return ret;
3823                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3824                         dmar_remove_dev_scope(info, rmrr->segment,
3825                                 rmrru->devices, rmrru->devices_cnt);
3826                 }
3827         }
3828
3829         list_for_each_entry(atsru, &dmar_atsr_units, list) {
3830                 if (atsru->include_all)
3831                         continue;
3832
3833                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3834                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3835                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3836                                         (void *)atsr + atsr->header.length,
3837                                         atsr->segment, atsru->devices,
3838                                         atsru->devices_cnt);
3839                         if (ret > 0)
3840                                 break;
3841                         else if(ret < 0)
3842                                 return ret;
3843                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3844                         if (dmar_remove_dev_scope(info, atsr->segment,
3845                                         atsru->devices, atsru->devices_cnt))
3846                                 break;
3847                 }
3848         }
3849
3850         return 0;
3851 }
3852
3853 /*
3854  * Here we only respond to action of unbound device from driver.
3855  *
3856  * Added device is not attached to its DMAR domain here yet. That will happen
3857  * when mapping the device to iova.
3858  */
3859 static int device_notifier(struct notifier_block *nb,
3860                                   unsigned long action, void *data)
3861 {
3862         struct device *dev = data;
3863         struct dmar_domain *domain;
3864
3865         if (iommu_dummy(dev))
3866                 return 0;
3867
3868         if (action != BUS_NOTIFY_REMOVED_DEVICE)
3869                 return 0;
3870
3871         /*
3872          * If the device is still attached to a device driver we can't
3873          * tear down the domain yet as DMA mappings may still be in use.
3874          * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
3875          */
3876         if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
3877                 return 0;
3878
3879         domain = find_domain(dev);
3880         if (!domain)
3881                 return 0;
3882
3883         down_read(&dmar_global_lock);
3884         domain_remove_one_dev_info(domain, dev);
3885         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
3886                 domain_exit(domain);
3887         up_read(&dmar_global_lock);
3888
3889         return 0;
3890 }
3891
3892 static struct notifier_block device_nb = {
3893         .notifier_call = device_notifier,
3894 };
3895
3896 static int intel_iommu_memory_notifier(struct notifier_block *nb,
3897                                        unsigned long val, void *v)
3898 {
3899         struct memory_notify *mhp = v;
3900         unsigned long long start, end;
3901         unsigned long start_vpfn, last_vpfn;
3902
3903         switch (val) {
3904         case MEM_GOING_ONLINE:
3905                 start = mhp->start_pfn << PAGE_SHIFT;
3906                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
3907                 if (iommu_domain_identity_map(si_domain, start, end)) {
3908                         pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
3909                                 start, end);
3910                         return NOTIFY_BAD;
3911                 }
3912                 break;
3913
3914         case MEM_OFFLINE:
3915         case MEM_CANCEL_ONLINE:
3916                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3917                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
3918                 while (start_vpfn <= last_vpfn) {
3919                         struct iova *iova;
3920                         struct dmar_drhd_unit *drhd;
3921                         struct intel_iommu *iommu;
3922                         struct page *freelist;
3923
3924                         iova = find_iova(&si_domain->iovad, start_vpfn);
3925                         if (iova == NULL) {
3926                                 pr_debug("dmar: failed get IOVA for PFN %lx\n",
3927                                          start_vpfn);
3928                                 break;
3929                         }
3930
3931                         iova = split_and_remove_iova(&si_domain->iovad, iova,
3932                                                      start_vpfn, last_vpfn);
3933                         if (iova == NULL) {
3934                                 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
3935                                         start_vpfn, last_vpfn);
3936                                 return NOTIFY_BAD;
3937                         }
3938
3939                         freelist = domain_unmap(si_domain, iova->pfn_lo,
3940                                                iova->pfn_hi);
3941
3942                         rcu_read_lock();
3943                         for_each_active_iommu(iommu, drhd)
3944                                 iommu_flush_iotlb_psi(iommu, si_domain->id,
3945                                         iova->pfn_lo, iova_size(iova),
3946                                         !freelist, 0);
3947                         rcu_read_unlock();
3948                         dma_free_pagelist(freelist);
3949
3950                         start_vpfn = iova->pfn_hi + 1;
3951                         free_iova_mem(iova);
3952                 }
3953                 break;
3954         }
3955
3956         return NOTIFY_OK;
3957 }
3958
3959 static struct notifier_block intel_iommu_memory_nb = {
3960         .notifier_call = intel_iommu_memory_notifier,
3961         .priority = 0
3962 };
3963
3964
3965 static ssize_t intel_iommu_show_version(struct device *dev,
3966                                         struct device_attribute *attr,
3967                                         char *buf)
3968 {
3969         struct intel_iommu *iommu = dev_get_drvdata(dev);
3970         u32 ver = readl(iommu->reg + DMAR_VER_REG);
3971         return sprintf(buf, "%d:%d\n",
3972                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
3973 }
3974 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
3975
3976 static ssize_t intel_iommu_show_address(struct device *dev,
3977                                         struct device_attribute *attr,
3978                                         char *buf)
3979 {
3980         struct intel_iommu *iommu = dev_get_drvdata(dev);
3981         return sprintf(buf, "%llx\n", iommu->reg_phys);
3982 }
3983 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
3984
3985 static ssize_t intel_iommu_show_cap(struct device *dev,
3986                                     struct device_attribute *attr,
3987                                     char *buf)
3988 {
3989         struct intel_iommu *iommu = dev_get_drvdata(dev);
3990         return sprintf(buf, "%llx\n", iommu->cap);
3991 }
3992 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
3993
3994 static ssize_t intel_iommu_show_ecap(struct device *dev,
3995                                     struct device_attribute *attr,
3996                                     char *buf)
3997 {
3998         struct intel_iommu *iommu = dev_get_drvdata(dev);
3999         return sprintf(buf, "%llx\n", iommu->ecap);
4000 }
4001 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4002
4003 static struct attribute *intel_iommu_attrs[] = {
4004         &dev_attr_version.attr,
4005         &dev_attr_address.attr,
4006         &dev_attr_cap.attr,
4007         &dev_attr_ecap.attr,
4008         NULL,
4009 };
4010
4011 static struct attribute_group intel_iommu_group = {
4012         .name = "intel-iommu",
4013         .attrs = intel_iommu_attrs,
4014 };
4015
4016 const struct attribute_group *intel_iommu_groups[] = {
4017         &intel_iommu_group,
4018         NULL,
4019 };
4020
4021 int __init intel_iommu_init(void)
4022 {
4023         int ret = -ENODEV;
4024         struct dmar_drhd_unit *drhd;
4025         struct intel_iommu *iommu;
4026
4027         /* VT-d is required for a TXT/tboot launch, so enforce that */
4028         force_on = tboot_force_iommu();
4029
4030         if (iommu_init_mempool()) {
4031                 if (force_on)
4032                         panic("tboot: Failed to initialize iommu memory\n");
4033                 return -ENOMEM;
4034         }
4035
4036         down_write(&dmar_global_lock);
4037         if (dmar_table_init()) {
4038                 if (force_on)
4039                         panic("tboot: Failed to initialize DMAR table\n");
4040                 goto out_free_dmar;
4041         }
4042
4043         /*
4044          * Disable translation if already enabled prior to OS handover.
4045          */
4046         for_each_active_iommu(iommu, drhd)
4047                 if (iommu->gcmd & DMA_GCMD_TE)
4048                         iommu_disable_translation(iommu);
4049
4050         if (dmar_dev_scope_init() < 0) {
4051                 if (force_on)
4052                         panic("tboot: Failed to initialize DMAR device scope\n");
4053                 goto out_free_dmar;
4054         }
4055
4056         if (no_iommu || dmar_disabled)
4057                 goto out_free_dmar;
4058
4059         if (list_empty(&dmar_rmrr_units))
4060                 printk(KERN_INFO "DMAR: No RMRR found\n");
4061
4062         if (list_empty(&dmar_atsr_units))
4063                 printk(KERN_INFO "DMAR: No ATSR found\n");
4064
4065         if (dmar_init_reserved_ranges()) {
4066                 if (force_on)
4067                         panic("tboot: Failed to reserve iommu ranges\n");
4068                 goto out_free_reserved_range;
4069         }
4070
4071         init_no_remapping_devices();
4072
4073         ret = init_dmars();
4074         if (ret) {
4075                 if (force_on)
4076                         panic("tboot: Failed to initialize DMARs\n");
4077                 printk(KERN_ERR "IOMMU: dmar init failed\n");
4078                 goto out_free_reserved_range;
4079         }
4080         up_write(&dmar_global_lock);
4081         printk(KERN_INFO
4082         "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4083
4084         init_timer(&unmap_timer);
4085 #ifdef CONFIG_SWIOTLB
4086         swiotlb = 0;
4087 #endif
4088         dma_ops = &intel_dma_ops;
4089
4090         init_iommu_pm_ops();
4091
4092         for_each_active_iommu(iommu, drhd)
4093                 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4094                                                        intel_iommu_groups,
4095                                                        iommu->name);
4096
4097         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4098         bus_register_notifier(&pci_bus_type, &device_nb);
4099         if (si_domain && !hw_pass_through)
4100                 register_memory_notifier(&intel_iommu_memory_nb);
4101
4102         intel_iommu_enabled = 1;
4103
4104         return 0;
4105
4106 out_free_reserved_range:
4107         put_iova_domain(&reserved_iova_list);
4108 out_free_dmar:
4109         intel_iommu_free_dmars();
4110         up_write(&dmar_global_lock);
4111         iommu_exit_mempool();
4112         return ret;
4113 }
4114
4115 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4116 {
4117         struct intel_iommu *iommu = opaque;
4118
4119         iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4120         return 0;
4121 }
4122
4123 /*
4124  * NB - intel-iommu lacks any sort of reference counting for the users of
4125  * dependent devices.  If multiple endpoints have intersecting dependent
4126  * devices, unbinding the driver from any one of them will possibly leave
4127  * the others unable to operate.
4128  */
4129 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4130                                            struct device *dev)
4131 {
4132         if (!iommu || !dev || !dev_is_pci(dev))
4133                 return;
4134
4135         pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4136 }
4137
4138 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4139                                        struct device *dev)
4140 {
4141         struct device_domain_info *info, *tmp;
4142         struct intel_iommu *iommu;
4143         unsigned long flags;
4144         int found = 0;
4145         u8 bus, devfn;
4146
4147         iommu = device_to_iommu(dev, &bus, &devfn);
4148         if (!iommu)
4149                 return;
4150
4151         spin_lock_irqsave(&device_domain_lock, flags);
4152         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4153                 if (info->iommu == iommu && info->bus == bus &&
4154                     info->devfn == devfn) {
4155                         unlink_domain_info(info);
4156                         spin_unlock_irqrestore(&device_domain_lock, flags);
4157
4158                         iommu_disable_dev_iotlb(info);
4159                         iommu_detach_dev(iommu, info->bus, info->devfn);
4160                         iommu_detach_dependent_devices(iommu, dev);
4161                         free_devinfo_mem(info);
4162
4163                         spin_lock_irqsave(&device_domain_lock, flags);
4164
4165                         if (found)
4166                                 break;
4167                         else
4168                                 continue;
4169                 }
4170
4171                 /* if there is no other devices under the same iommu
4172                  * owned by this domain, clear this iommu in iommu_bmp
4173                  * update iommu count and coherency
4174                  */
4175                 if (info->iommu == iommu)
4176                         found = 1;
4177         }
4178
4179         spin_unlock_irqrestore(&device_domain_lock, flags);
4180
4181         if (found == 0) {
4182                 domain_detach_iommu(domain, iommu);
4183                 if (!domain_type_is_vm_or_si(domain))
4184                         iommu_detach_domain(domain, iommu);
4185         }
4186 }
4187
4188 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4189 {
4190         int adjust_width;
4191
4192         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
4193         domain_reserve_special_ranges(domain);
4194
4195         /* calculate AGAW */
4196         domain->gaw = guest_width;
4197         adjust_width = guestwidth_to_adjustwidth(guest_width);
4198         domain->agaw = width_to_agaw(adjust_width);
4199
4200         domain->iommu_coherency = 0;
4201         domain->iommu_snooping = 0;
4202         domain->iommu_superpage = 0;
4203         domain->max_addr = 0;
4204
4205         /* always allocate the top pgd */
4206         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4207         if (!domain->pgd)
4208                 return -ENOMEM;
4209         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4210         return 0;
4211 }
4212
4213 static int intel_iommu_domain_init(struct iommu_domain *domain)
4214 {
4215         struct dmar_domain *dmar_domain;
4216
4217         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4218         if (!dmar_domain) {
4219                 printk(KERN_ERR
4220                         "intel_iommu_domain_init: dmar_domain == NULL\n");
4221                 return -ENOMEM;
4222         }
4223         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4224                 printk(KERN_ERR
4225                         "intel_iommu_domain_init() failed\n");
4226                 domain_exit(dmar_domain);
4227                 return -ENOMEM;
4228         }
4229         domain_update_iommu_cap(dmar_domain);
4230         domain->priv = dmar_domain;
4231
4232         domain->geometry.aperture_start = 0;
4233         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4234         domain->geometry.force_aperture = true;
4235
4236         return 0;
4237 }
4238
4239 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
4240 {
4241         struct dmar_domain *dmar_domain = domain->priv;
4242
4243         domain->priv = NULL;
4244         domain_exit(dmar_domain);
4245 }
4246
4247 static int intel_iommu_attach_device(struct iommu_domain *domain,
4248                                      struct device *dev)
4249 {
4250         struct dmar_domain *dmar_domain = domain->priv;
4251         struct intel_iommu *iommu;
4252         int addr_width;
4253         u8 bus, devfn;
4254
4255         if (device_is_rmrr_locked(dev)) {
4256                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4257                 return -EPERM;
4258         }
4259
4260         /* normally dev is not mapped */
4261         if (unlikely(domain_context_mapped(dev))) {
4262                 struct dmar_domain *old_domain;
4263
4264                 old_domain = find_domain(dev);
4265                 if (old_domain) {
4266                         if (domain_type_is_vm_or_si(dmar_domain))
4267                                 domain_remove_one_dev_info(old_domain, dev);
4268                         else
4269                                 domain_remove_dev_info(old_domain);
4270                 }
4271         }
4272
4273         iommu = device_to_iommu(dev, &bus, &devfn);
4274         if (!iommu)
4275                 return -ENODEV;
4276
4277         /* check if this iommu agaw is sufficient for max mapped address */
4278         addr_width = agaw_to_width(iommu->agaw);
4279         if (addr_width > cap_mgaw(iommu->cap))
4280                 addr_width = cap_mgaw(iommu->cap);
4281
4282         if (dmar_domain->max_addr > (1LL << addr_width)) {
4283                 printk(KERN_ERR "%s: iommu width (%d) is not "
4284                        "sufficient for the mapped address (%llx)\n",
4285                        __func__, addr_width, dmar_domain->max_addr);
4286                 return -EFAULT;
4287         }
4288         dmar_domain->gaw = addr_width;
4289
4290         /*
4291          * Knock out extra levels of page tables if necessary
4292          */
4293         while (iommu->agaw < dmar_domain->agaw) {
4294                 struct dma_pte *pte;
4295
4296                 pte = dmar_domain->pgd;
4297                 if (dma_pte_present(pte)) {
4298                         dmar_domain->pgd = (struct dma_pte *)
4299                                 phys_to_virt(dma_pte_addr(pte));
4300                         free_pgtable_page(pte);
4301                 }
4302                 dmar_domain->agaw--;
4303         }
4304
4305         return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
4306 }
4307
4308 static void intel_iommu_detach_device(struct iommu_domain *domain,
4309                                       struct device *dev)
4310 {
4311         struct dmar_domain *dmar_domain = domain->priv;
4312
4313         domain_remove_one_dev_info(dmar_domain, dev);
4314 }
4315
4316 static int intel_iommu_map(struct iommu_domain *domain,
4317                            unsigned long iova, phys_addr_t hpa,
4318                            size_t size, int iommu_prot)
4319 {
4320         struct dmar_domain *dmar_domain = domain->priv;
4321         u64 max_addr;
4322         int prot = 0;
4323         int ret;
4324
4325         if (iommu_prot & IOMMU_READ)
4326                 prot |= DMA_PTE_READ;
4327         if (iommu_prot & IOMMU_WRITE)
4328                 prot |= DMA_PTE_WRITE;
4329         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4330                 prot |= DMA_PTE_SNP;
4331
4332         max_addr = iova + size;
4333         if (dmar_domain->max_addr < max_addr) {
4334                 u64 end;
4335
4336                 /* check if minimum agaw is sufficient for mapped address */
4337                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4338                 if (end < max_addr) {
4339                         printk(KERN_ERR "%s: iommu width (%d) is not "
4340                                "sufficient for the mapped address (%llx)\n",
4341                                __func__, dmar_domain->gaw, max_addr);
4342                         return -EFAULT;
4343                 }
4344                 dmar_domain->max_addr = max_addr;
4345         }
4346         /* Round up size to next multiple of PAGE_SIZE, if it and
4347            the low bits of hpa would take us onto the next page */
4348         size = aligned_nrpages(hpa, size);
4349         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4350                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4351         return ret;
4352 }
4353
4354 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4355                                 unsigned long iova, size_t size)
4356 {
4357         struct dmar_domain *dmar_domain = domain->priv;
4358         struct page *freelist = NULL;
4359         struct intel_iommu *iommu;
4360         unsigned long start_pfn, last_pfn;
4361         unsigned int npages;
4362         int iommu_id, num, ndomains, level = 0;
4363
4364         /* Cope with horrid API which requires us to unmap more than the
4365            size argument if it happens to be a large-page mapping. */
4366         if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4367                 BUG();
4368
4369         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4370                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4371
4372         start_pfn = iova >> VTD_PAGE_SHIFT;
4373         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4374
4375         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4376
4377         npages = last_pfn - start_pfn + 1;
4378
4379         for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4380                iommu = g_iommus[iommu_id];
4381
4382                /*
4383                 * find bit position of dmar_domain
4384                 */
4385                ndomains = cap_ndoms(iommu->cap);
4386                for_each_set_bit(num, iommu->domain_ids, ndomains) {
4387                        if (iommu->domains[num] == dmar_domain)
4388                                iommu_flush_iotlb_psi(iommu, num, start_pfn,
4389                                                      npages, !freelist, 0);
4390                }
4391
4392         }
4393
4394         dma_free_pagelist(freelist);
4395
4396         if (dmar_domain->max_addr == iova + size)
4397                 dmar_domain->max_addr = iova;
4398
4399         return size;
4400 }
4401
4402 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4403                                             dma_addr_t iova)
4404 {
4405         struct dmar_domain *dmar_domain = domain->priv;
4406         struct dma_pte *pte;
4407         int level = 0;
4408         u64 phys = 0;
4409
4410         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4411         if (pte)
4412                 phys = dma_pte_addr(pte);
4413
4414         return phys;
4415 }
4416
4417 static bool intel_iommu_capable(enum iommu_cap cap)
4418 {
4419         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4420                 return domain_update_iommu_snooping(NULL) == 1;
4421         if (cap == IOMMU_CAP_INTR_REMAP)
4422                 return irq_remapping_enabled == 1;
4423
4424         return false;
4425 }
4426
4427 static int intel_iommu_add_device(struct device *dev)
4428 {
4429         struct intel_iommu *iommu;
4430         struct iommu_group *group;
4431         u8 bus, devfn;
4432
4433         iommu = device_to_iommu(dev, &bus, &devfn);
4434         if (!iommu)
4435                 return -ENODEV;
4436
4437         iommu_device_link(iommu->iommu_dev, dev);
4438
4439         group = iommu_group_get_for_dev(dev);
4440
4441         if (IS_ERR(group))
4442                 return PTR_ERR(group);
4443
4444         iommu_group_put(group);
4445         return 0;
4446 }
4447
4448 static void intel_iommu_remove_device(struct device *dev)
4449 {
4450         struct intel_iommu *iommu;
4451         u8 bus, devfn;
4452
4453         iommu = device_to_iommu(dev, &bus, &devfn);
4454         if (!iommu)
4455                 return;
4456
4457         iommu_group_remove_device(dev);
4458
4459         iommu_device_unlink(iommu->iommu_dev, dev);
4460 }
4461
4462 static const struct iommu_ops intel_iommu_ops = {
4463         .capable        = intel_iommu_capable,
4464         .domain_init    = intel_iommu_domain_init,
4465         .domain_destroy = intel_iommu_domain_destroy,
4466         .attach_dev     = intel_iommu_attach_device,
4467         .detach_dev     = intel_iommu_detach_device,
4468         .map            = intel_iommu_map,
4469         .unmap          = intel_iommu_unmap,
4470         .iova_to_phys   = intel_iommu_iova_to_phys,
4471         .add_device     = intel_iommu_add_device,
4472         .remove_device  = intel_iommu_remove_device,
4473         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4474 };
4475
4476 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4477 {
4478         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4479         printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4480         dmar_map_gfx = 0;
4481 }
4482
4483 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4484 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4485 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4486 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4487 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4488 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4489 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4490
4491 static void quirk_iommu_rwbf(struct pci_dev *dev)
4492 {
4493         /*
4494          * Mobile 4 Series Chipset neglects to set RWBF capability,
4495          * but needs it. Same seems to hold for the desktop versions.
4496          */
4497         printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4498         rwbf_quirk = 1;
4499 }
4500
4501 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4502 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4503 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4504 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4505 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4506 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4507 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4508
4509 #define GGC 0x52
4510 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4511 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4512 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4513 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4514 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4515 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4516 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4517 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4518
4519 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4520 {
4521         unsigned short ggc;
4522
4523         if (pci_read_config_word(dev, GGC, &ggc))
4524                 return;
4525
4526         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4527                 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4528                 dmar_map_gfx = 0;
4529         } else if (dmar_map_gfx) {
4530                 /* we have to ensure the gfx device is idle before we flush */
4531                 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4532                 intel_iommu_strict = 1;
4533        }
4534 }
4535 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4536 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4537 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4538 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4539
4540 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4541    ISOCH DMAR unit for the Azalia sound device, but not give it any
4542    TLB entries, which causes it to deadlock. Check for that.  We do
4543    this in a function called from init_dmars(), instead of in a PCI
4544    quirk, because we don't want to print the obnoxious "BIOS broken"
4545    message if VT-d is actually disabled.
4546 */
4547 static void __init check_tylersburg_isoch(void)
4548 {
4549         struct pci_dev *pdev;
4550         uint32_t vtisochctrl;
4551
4552         /* If there's no Azalia in the system anyway, forget it. */
4553         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4554         if (!pdev)
4555                 return;
4556         pci_dev_put(pdev);
4557
4558         /* System Management Registers. Might be hidden, in which case
4559            we can't do the sanity check. But that's OK, because the
4560            known-broken BIOSes _don't_ actually hide it, so far. */
4561         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4562         if (!pdev)
4563                 return;
4564
4565         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4566                 pci_dev_put(pdev);
4567                 return;
4568         }
4569
4570         pci_dev_put(pdev);
4571
4572         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4573         if (vtisochctrl & 1)
4574                 return;
4575
4576         /* Drop all bits other than the number of TLB entries */
4577         vtisochctrl &= 0x1c;
4578
4579         /* If we have the recommended number of TLB entries (16), fine. */
4580         if (vtisochctrl == 0x10)
4581                 return;
4582
4583         /* Zero TLB entries? You get to ride the short bus to school. */
4584         if (!vtisochctrl) {
4585                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4586                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4587                      dmi_get_system_info(DMI_BIOS_VENDOR),
4588                      dmi_get_system_info(DMI_BIOS_VERSION),
4589                      dmi_get_system_info(DMI_PRODUCT_VERSION));
4590                 iommu_identity_mapping |= IDENTMAP_AZALIA;
4591                 return;
4592         }
4593         
4594         printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4595                vtisochctrl);
4596 }