Merge branches 'iommu/page-sizes' and 'iommu/group-id' into next
authorJoerg Roedel <joerg.roedel@amd.com>
Mon, 9 Jan 2012 12:06:28 +0000 (13:06 +0100)
committerJoerg Roedel <joerg.roedel@amd.com>
Mon, 9 Jan 2012 12:06:28 +0000 (13:06 +0100)
Conflicts:
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
include/linux/iommu.h

1  2 
Documentation/kernel-parameters.txt
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
include/linux/iommu.h

Simple merge
@@@ -3197,7 -2802,7 +3217,8 @@@ static struct iommu_ops amd_iommu_ops 
        .unmap = amd_iommu_unmap,
        .iova_to_phys = amd_iommu_iova_to_phys,
        .domain_has_cap = amd_iommu_domain_has_cap,
+       .device_group = amd_iommu_device_group,
 +      .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
  };
  
  /*****************************************************************************
@@@ -4089,7 -4117,7 +4137,8 @@@ static struct iommu_ops intel_iommu_op
        .unmap          = intel_iommu_unmap,
        .iova_to_phys   = intel_iommu_iova_to_phys,
        .domain_has_cap = intel_iommu_domain_has_cap,
+       .device_group   = intel_iommu_device_group,
 +      .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
  };
  
  static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
@@@ -243,41 -230,19 +294,50 @@@ size_t iommu_unmap(struct iommu_domain 
        if (unlikely(domain->ops->unmap == NULL))
                return -ENODEV;
  
 -      size         = PAGE_SIZE << gfp_order;
 -
 -      BUG_ON(!IS_ALIGNED(iova, size));
 -
 -      return domain->ops->unmap(domain, iova, gfp_order);
 +      /* find out the minimum page size supported */
 +      min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
 +
 +      /*
 +       * The virtual address, as well as the size of the mapping, must be
 +       * aligned (at least) to the size of the smallest page supported
 +       * by the hardware
 +       */
 +      if (!IS_ALIGNED(iova | size, min_pagesz)) {
 +              pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
 +                                      iova, (unsigned long)size, min_pagesz);
 +              return -EINVAL;
 +      }
 +
 +      pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
 +                                                      (unsigned long)size);
 +
 +      /*
 +       * Keep iterating until we either unmap 'size' bytes (or more)
 +       * or we hit an area that isn't mapped.
 +       */
 +      while (unmapped < size) {
 +              size_t left = size - unmapped;
 +
 +              unmapped_page = domain->ops->unmap(domain, iova, left);
 +              if (!unmapped_page)
 +                      break;
 +
 +              pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
 +                                      (unsigned long)unmapped_page);
 +
 +              iova += unmapped_page;
 +              unmapped += unmapped_page;
 +      }
 +
 +      return unmapped;
  }
  EXPORT_SYMBOL_GPL(iommu_unmap);
+ int iommu_device_group(struct device *dev, unsigned int *groupid)
+ {
+       if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
+               return dev->bus->iommu_ops->device_group(dev, groupid);
+       return -ENODEV;
+ }
+ EXPORT_SYMBOL_GPL(iommu_device_group);
@@@ -74,7 -61,7 +74,8 @@@ struct iommu_ops 
                                    unsigned long iova);
        int (*domain_has_cap)(struct iommu_domain *domain,
                              unsigned long cap);
+       int (*device_group)(struct device *dev, unsigned int *groupid);
 +      unsigned long pgsize_bitmap;
  };
  
  extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);