Merge branch 'x86/vt-d' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu...
[cascardo/linux.git] / drivers / iommu / intel-iommu.c
index 02cd26a..ea3b13a 100644 (file)
@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root)
 }
 static inline void set_root_value(struct root_entry *root, unsigned long value)
 {
+       root->val &= ~VTD_PAGE_MASK;
        root->val |= value & VTD_PAGE_MASK;
 }
 
@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context,
 static inline void context_set_address_root(struct context_entry *context,
                                            unsigned long value)
 {
+       context->lo &= ~VTD_PAGE_MASK;
        context->lo |= value & VTD_PAGE_MASK;
 }
 
@@ -328,17 +330,10 @@ static int hw_pass_through = 1;
 /* si_domain contains mulitple devices */
 #define DOMAIN_FLAG_STATIC_IDENTITY    (1 << 1)
 
-/* define the limit of IOMMUs supported in each domain */
-#ifdef CONFIG_X86
-# define       IOMMU_UNITS_SUPPORTED   MAX_IO_APICS
-#else
-# define       IOMMU_UNITS_SUPPORTED   64
-#endif
-
 struct dmar_domain {
        int     id;                     /* domain id */
        int     nid;                    /* node id */
-       DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
+       DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
                                        /* bitmap of iommus this domain uses*/
 
        struct list_head devices;       /* all devices' list */
@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
        unsigned long flags;
 
        root = (struct root_entry *)alloc_pgtable_page(iommu->node);
-       if (!root)
+       if (!root) {
+               pr_err("IOMMU: allocating root entry for %s failed\n",
+                       iommu->name);
                return -ENOMEM;
+       }
 
        __iommu_flush_cache(iommu, root, ROOT_SIZE);
 
@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
        return 0;
 }
 
-static void free_dmar_iommu(struct intel_iommu *iommu)
+static void disable_dmar_iommu(struct intel_iommu *iommu)
 {
        struct dmar_domain *domain;
        int i;
@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
 
        if (iommu->gcmd & DMA_GCMD_TE)
                iommu_disable_translation(iommu);
+}
 
-       kfree(iommu->domains);
-       kfree(iommu->domain_ids);
-       iommu->domains = NULL;
-       iommu->domain_ids = NULL;
+static void free_dmar_iommu(struct intel_iommu *iommu)
+{
+       if ((iommu->domains) && (iommu->domain_ids)) {
+               kfree(iommu->domains);
+               kfree(iommu->domain_ids);
+               iommu->domains = NULL;
+               iommu->domain_ids = NULL;
+       }
 
        g_iommus[iommu->seq_id] = NULL;
 
@@ -2708,6 +2711,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
        return 0;
 }
 
+static void intel_iommu_init_qi(struct intel_iommu *iommu)
+{
+       /*
+        * Start from the sane iommu hardware state.
+        * If the queued invalidation is already initialized by us
+        * (for example, while enabling interrupt-remapping) then
+        * we got the things already rolling from a sane state.
+        */
+       if (!iommu->qi) {
+               /*
+                * Clear any previous faults.
+                */
+               dmar_fault(-1, iommu);
+               /*
+                * Disable queued invalidation if supported and already enabled
+                * before OS handover.
+                */
+               dmar_disable_qi(iommu);
+       }
+
+       if (dmar_enable_qi(iommu)) {
+               /*
+                * Queued Invalidate not enabled, use Register Based Invalidate
+                */
+               iommu->flush.flush_context = __iommu_flush_context;
+               iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+               pr_info("IOMMU: %s using Register based invalidation\n",
+                       iommu->name);
+       } else {
+               iommu->flush.flush_context = qi_flush_context;
+               iommu->flush.flush_iotlb = qi_flush_iotlb;
+               pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
+       }
+}
+
 static int __init init_dmars(void)
 {
        struct dmar_drhd_unit *drhd;
@@ -2728,14 +2766,18 @@ static int __init init_dmars(void)
                 * threaded kernel __init code path all other access are read
                 * only
                 */
-               if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
+               if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
                        g_num_of_iommus++;
                        continue;
                }
                printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
-                         IOMMU_UNITS_SUPPORTED);
+                         DMAR_UNITS_SUPPORTED);
        }
 
+       /* Preallocate enough resources for IOMMU hot-addition */
+       if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
+               g_num_of_iommus = DMAR_UNITS_SUPPORTED;
+
        g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
                        GFP_KERNEL);
        if (!g_iommus) {
@@ -2764,58 +2806,14 @@ static int __init init_dmars(void)
                 * among all IOMMU's. Need to Split it later.
                 */
                ret = iommu_alloc_root_entry(iommu);
-               if (ret) {
-                       printk(KERN_ERR "IOMMU: allocate root entry failed\n");
+               if (ret)
                        goto free_iommu;
-               }
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
        }
 
-       /*
-        * Start from the sane iommu hardware state.
-        */
-       for_each_active_iommu(iommu, drhd) {
-               /*
-                * If the queued invalidation is already initialized by us
-                * (for example, while enabling interrupt-remapping) then
-                * we got the things already rolling from a sane state.
-                */
-               if (iommu->qi)
-                       continue;
-
-               /*
-                * Clear any previous faults.
-                */
-               dmar_fault(-1, iommu);
-               /*
-                * Disable queued invalidation if supported and already enabled
-                * before OS handover.
-                */
-               dmar_disable_qi(iommu);
-       }
-
-       for_each_active_iommu(iommu, drhd) {
-               if (dmar_enable_qi(iommu)) {
-                       /*
-                        * Queued Invalidate not enabled, use Register Based
-                        * Invalidate
-                        */
-                       iommu->flush.flush_context = __iommu_flush_context;
-                       iommu->flush.flush_iotlb = __iommu_flush_iotlb;
-                       printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
-                              "invalidation\n",
-                               iommu->seq_id,
-                              (unsigned long long)drhd->reg_base_addr);
-               } else {
-                       iommu->flush.flush_context = qi_flush_context;
-                       iommu->flush.flush_iotlb = qi_flush_iotlb;
-                       printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
-                              "invalidation\n",
-                               iommu->seq_id,
-                              (unsigned long long)drhd->reg_base_addr);
-               }
-       }
+       for_each_active_iommu(iommu, drhd)
+               intel_iommu_init_qi(iommu);
 
        if (iommu_pass_through)
                iommu_identity_mapping |= IDENTMAP_ALL;
@@ -2901,8 +2899,10 @@ static int __init init_dmars(void)
        return 0;
 
 free_iommu:
-       for_each_active_iommu(iommu, drhd)
+       for_each_active_iommu(iommu, drhd) {
+               disable_dmar_iommu(iommu);
                free_dmar_iommu(iommu);
+       }
        kfree(deferred_flush);
 free_g_iommus:
        kfree(g_iommus);
@@ -3682,7 +3682,7 @@ static inline void init_iommu_pm_ops(void) {}
 #endif /* CONFIG_PM */
 
 
-int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
 {
        struct acpi_dmar_reserved_memory *rmrr;
        struct dmar_rmrr_unit *rmrru;
@@ -3708,17 +3708,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
        return 0;
 }
 
-int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
+{
+       struct dmar_atsr_unit *atsru;
+       struct acpi_dmar_atsr *tmp;
+
+       list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+               tmp = (struct acpi_dmar_atsr *)atsru->hdr;
+               if (atsr->segment != tmp->segment)
+                       continue;
+               if (atsr->header.length != tmp->header.length)
+                       continue;
+               if (memcmp(atsr, tmp, atsr->header.length) == 0)
+                       return atsru;
+       }
+
+       return NULL;
+}
+
+int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
 {
        struct acpi_dmar_atsr *atsr;
        struct dmar_atsr_unit *atsru;
 
+       if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
+               return 0;
+
        atsr = container_of(hdr, struct acpi_dmar_atsr, header);
-       atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+       atsru = dmar_find_atsr(atsr);
+       if (atsru)
+               return 0;
+
+       atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
        if (!atsru)
                return -ENOMEM;
 
-       atsru->hdr = hdr;
+       /*
+        * If memory is allocated from slab by ACPI _DSM method, we need to
+        * copy the memory content because the memory buffer will be freed
+        * on return.
+        */
+       atsru->hdr = (void *)(atsru + 1);
+       memcpy(atsru->hdr, hdr, hdr->length);
        atsru->include_all = atsr->flags & 0x1;
        if (!atsru->include_all) {
                atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
@@ -3741,6 +3772,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
        kfree(atsru);
 }
 
+int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+       struct acpi_dmar_atsr *atsr;
+       struct dmar_atsr_unit *atsru;
+
+       atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+       atsru = dmar_find_atsr(atsr);
+       if (atsru) {
+               list_del_rcu(&atsru->list);
+               synchronize_rcu();
+               intel_iommu_free_atsr(atsru);
+       }
+
+       return 0;
+}
+
+int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+       int i;
+       struct device *dev;
+       struct acpi_dmar_atsr *atsr;
+       struct dmar_atsr_unit *atsru;
+
+       atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+       atsru = dmar_find_atsr(atsr);
+       if (!atsru)
+               return 0;
+
+       if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
+               for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
+                                         i, dev)
+                       return -EBUSY;
+
+       return 0;
+}
+
+static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
+{
+       int sp, ret = 0;
+       struct intel_iommu *iommu = dmaru->iommu;
+
+       if (g_iommus[iommu->seq_id])
+               return 0;
+
+       if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
+               pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
+                       iommu->name);
+               return -ENXIO;
+       }
+       if (!ecap_sc_support(iommu->ecap) &&
+           domain_update_iommu_snooping(iommu)) {
+               pr_warn("IOMMU: %s doesn't support snooping.\n",
+                       iommu->name);
+               return -ENXIO;
+       }
+       sp = domain_update_iommu_superpage(iommu) - 1;
+       if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
+               pr_warn("IOMMU: %s doesn't support large page.\n",
+                       iommu->name);
+               return -ENXIO;
+       }
+
+       /*
+        * Disable translation if already enabled prior to OS handover.
+        */
+       if (iommu->gcmd & DMA_GCMD_TE)
+               iommu_disable_translation(iommu);
+
+       g_iommus[iommu->seq_id] = iommu;
+       ret = iommu_init_domains(iommu);
+       if (ret == 0)
+               ret = iommu_alloc_root_entry(iommu);
+       if (ret)
+               goto out;
+
+       if (dmaru->ignored) {
+               /*
+                * we always have to disable PMRs or DMA may fail on this device
+                */
+               if (force_on)
+                       iommu_disable_protect_mem_regions(iommu);
+               return 0;
+       }
+
+       intel_iommu_init_qi(iommu);
+       iommu_flush_write_buffer(iommu);
+       ret = dmar_set_interrupt(iommu);
+       if (ret)
+               goto disable_iommu;
+
+       iommu_set_root_entry(iommu);
+       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+       iommu_enable_translation(iommu);
+
+       if (si_domain) {
+               ret = iommu_attach_domain(si_domain, iommu);
+               if (ret < 0 || si_domain->id != ret)
+                       goto disable_iommu;
+               domain_attach_iommu(si_domain, iommu);
+       }
+
+       iommu_disable_protect_mem_regions(iommu);
+       return 0;
+
+disable_iommu:
+       disable_dmar_iommu(iommu);
+out:
+       free_dmar_iommu(iommu);
+       return ret;
+}
+
+int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+       int ret = 0;
+       struct intel_iommu *iommu = dmaru->iommu;
+
+       if (!intel_iommu_enabled)
+               return 0;
+       if (iommu == NULL)
+               return -EINVAL;
+
+       if (insert) {
+               ret = intel_iommu_add(dmaru);
+       } else {
+               disable_dmar_iommu(iommu);
+               free_dmar_iommu(iommu);
+       }
+
+       return ret;
+}
+
 static void intel_iommu_free_dmars(void)
 {
        struct dmar_rmrr_unit *rmrru, *rmrr_n;