Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / arm64 / mm / dma-mapping.c
index f6c55af..bdacead 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/gfp.h>
 #include <linux/acpi.h>
 #include <linux/bootmem.h>
+#include <linux/cache.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/genalloc.h>
 
 #include <asm/cacheflush.h>
 
-static int swiotlb __read_mostly;
+static int swiotlb __ro_after_init;
 
-static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
                                 bool coherent)
 {
-       if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+       if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
                return pgprot_writecombine(prot);
        return prot;
 }
@@ -91,7 +92,7 @@ static int __free_from_pool(void *start, size_t size)
 
 static void *__dma_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        if (dev == NULL) {
                WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
@@ -121,7 +122,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
 
 static void __dma_free_coherent(struct device *dev, size_t size,
                                void *vaddr, dma_addr_t dma_handle,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        bool freed;
        phys_addr_t paddr = dma_to_phys(dev, dma_handle);
@@ -140,7 +141,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
 
 static void *__dma_alloc(struct device *dev, size_t size,
                         dma_addr_t *dma_handle, gfp_t flags,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
        struct page *page;
        void *ptr, *coherent_ptr;
@@ -168,7 +169,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
                return ptr;
 
        /* remove any dirty cache lines on the kernel alias */
-       __dma_flush_range(ptr, ptr + size);
+       __dma_flush_area(ptr, size);
 
        /* create a coherent mapping */
        page = virt_to_page(ptr);
@@ -188,7 +189,7 @@ no_mem:
 
 static void __dma_free(struct device *dev, size_t size,
                       void *vaddr, dma_addr_t dma_handle,
-                      struct dma_attrs *attrs)
+                      unsigned long attrs)
 {
        void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
 
@@ -205,7 +206,7 @@ static void __dma_free(struct device *dev, size_t size,
 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction dir,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        dma_addr_t dev_addr;
 
@@ -219,7 +220,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
 
 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
                                 size_t size, enum dma_data_direction dir,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        if (!is_device_dma_coherent(dev))
                __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
@@ -228,7 +229,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
 
 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
                                  int nelems, enum dma_data_direction dir,
-                                 struct dma_attrs *attrs)
+                                 unsigned long attrs)
 {
        struct scatterlist *sg;
        int i, ret;
@@ -245,7 +246,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
 static void __swiotlb_unmap_sg_attrs(struct device *dev,
                                     struct scatterlist *sgl, int nelems,
                                     enum dma_data_direction dir,
-                                    struct dma_attrs *attrs)
+                                    unsigned long attrs)
 {
        struct scatterlist *sg;
        int i;
@@ -306,7 +307,7 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
 static int __swiotlb_mmap(struct device *dev,
                          struct vm_area_struct *vma,
                          void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        int ret = -ENXIO;
        unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
@@ -333,7 +334,7 @@ static int __swiotlb_mmap(struct device *dev,
 
 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
                                 void *cpu_addr, dma_addr_t handle, size_t size,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
 
@@ -387,7 +388,7 @@ static int __init atomic_pool_init(void)
                void *page_addr = page_address(page);
 
                memset(page_addr, 0, atomic_pool_size);
-               __dma_flush_range(page_addr, page_addr + atomic_pool_size);
+               __dma_flush_area(page_addr, atomic_pool_size);
 
                atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
                if (!atomic_pool)
@@ -435,21 +436,21 @@ out:
 
 static void *__dummy_alloc(struct device *dev, size_t size,
                           dma_addr_t *dma_handle, gfp_t flags,
-                          struct dma_attrs *attrs)
+                          unsigned long attrs)
 {
        return NULL;
 }
 
 static void __dummy_free(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t dma_handle,
-                        struct dma_attrs *attrs)
+                        unsigned long attrs)
 {
 }
 
 static int __dummy_mmap(struct device *dev,
                        struct vm_area_struct *vma,
                        void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                       struct dma_attrs *attrs)
+                       unsigned long attrs)
 {
        return -ENXIO;
 }
@@ -457,20 +458,20 @@ static int __dummy_mmap(struct device *dev,
 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        return DMA_ERROR_CODE;
 }
 
 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
                               size_t size, enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
 }
 
 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
                          int nelems, enum dma_data_direction dir,
-                         struct dma_attrs *attrs)
+                         unsigned long attrs)
 {
        return 0;
 }
@@ -478,7 +479,7 @@ static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
 static void __dummy_unmap_sg(struct device *dev,
                             struct scatterlist *sgl, int nelems,
                             enum dma_data_direction dir,
-                            struct dma_attrs *attrs)
+                            unsigned long attrs)
 {
 }
 
@@ -548,12 +549,12 @@ fs_initcall(dma_debug_do_init);
 /* Thankfully, all cache ops are by VA so we can ignore phys here */
 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
 {
-       __dma_flush_range(virt, virt + PAGE_SIZE);
+       __dma_flush_area(virt, PAGE_SIZE);
 }
 
 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                 dma_addr_t *handle, gfp_t gfp,
-                                struct dma_attrs *attrs)
+                                unsigned long attrs)
 {
        bool coherent = is_device_dma_coherent(dev);
        int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
@@ -613,7 +614,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
 }
 
 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
-                              dma_addr_t handle, struct dma_attrs *attrs)
+                              dma_addr_t handle, unsigned long attrs)
 {
        size_t iosize = size;
 
@@ -629,7 +630,7 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
         * Hence how dodgy the below logic looks...
         */
        if (__in_atomic_pool(cpu_addr, size)) {
-               iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+               iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
                __free_from_pool(cpu_addr, size);
        } else if (is_vmalloc_addr(cpu_addr)){
                struct vm_struct *area = find_vm_area(cpu_addr);
@@ -639,14 +640,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
                iommu_dma_free(dev, area->pages, iosize, &handle);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else {
-               iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+               iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
                __free_pages(virt_to_page(cpu_addr), get_order(size));
        }
 }
 
 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
                              void *cpu_addr, dma_addr_t dma_addr, size_t size,
-                             struct dma_attrs *attrs)
+                             unsigned long attrs)
 {
        struct vm_struct *area;
        int ret;
@@ -666,7 +667,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 
 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
                               void *cpu_addr, dma_addr_t dma_addr,
-                              size_t size, struct dma_attrs *attrs)
+                              size_t size, unsigned long attrs)
 {
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct vm_struct *area = find_vm_area(cpu_addr);
@@ -707,14 +708,14 @@ static void __iommu_sync_single_for_device(struct device *dev,
 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
        bool coherent = is_device_dma_coherent(dev);
        int prot = dma_direction_to_prot(dir, coherent);
        dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
 
        if (!iommu_dma_mapping_error(dev, dev_addr) &&
-           !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __iommu_sync_single_for_device(dev, dev_addr, size, dir);
 
        return dev_addr;
@@ -722,9 +723,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
 
 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
                               size_t size, enum dma_data_direction dir,
-                              struct dma_attrs *attrs)
+                              unsigned long attrs)
 {
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
 
        iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
@@ -760,11 +761,11 @@ static void __iommu_sync_sg_for_device(struct device *dev,
 
 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
                                int nelems, enum dma_data_direction dir,
-                               struct dma_attrs *attrs)
+                               unsigned long attrs)
 {
        bool coherent = is_device_dma_coherent(dev);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
 
        return iommu_dma_map_sg(dev, sgl, nelems,
@@ -774,9 +775,9 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
 static void __iommu_unmap_sg_attrs(struct device *dev,
                                   struct scatterlist *sgl, int nelems,
                                   enum dma_data_direction dir,
-                                  struct dma_attrs *attrs)
+                                  unsigned long attrs)
 {
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
 
        iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);