Merge tag 'gcc-plugins-v4.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / microblaze / kernel / dma.c
1 /*
2  * Copyright (C) 2009-2010 PetaLogix
3  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4  *
5  * Provide default implementations of the DMA mapping callbacks for
6  * directly mapped busses.
7  */
8
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
14 #include <linux/bug.h>
15
16 #define NOT_COHERENT_CACHE
17
18 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
19                                        dma_addr_t *dma_handle, gfp_t flag,
20                                        unsigned long attrs)
21 {
22 #ifdef NOT_COHERENT_CACHE
23         return consistent_alloc(flag, size, dma_handle);
24 #else
25         void *ret;
26         struct page *page;
27         int node = dev_to_node(dev);
28
29         /* ignore region specifiers */
30         flag  &= ~(__GFP_HIGHMEM);
31
32         page = alloc_pages_node(node, flag, get_order(size));
33         if (page == NULL)
34                 return NULL;
35         ret = page_address(page);
36         memset(ret, 0, size);
37         *dma_handle = virt_to_phys(ret);
38
39         return ret;
40 #endif
41 }
42
43 static void dma_direct_free_coherent(struct device *dev, size_t size,
44                                      void *vaddr, dma_addr_t dma_handle,
45                                      unsigned long attrs)
46 {
47 #ifdef NOT_COHERENT_CACHE
48         consistent_free(size, vaddr);
49 #else
50         free_pages((unsigned long)vaddr, get_order(size));
51 #endif
52 }
53
54 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
55                              int nents, enum dma_data_direction direction,
56                              unsigned long attrs)
57 {
58         struct scatterlist *sg;
59         int i;
60
61         /* FIXME this part of code is untested */
62         for_each_sg(sgl, sg, nents, i) {
63                 sg->dma_address = sg_phys(sg);
64                 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
65                                                         sg->length, direction);
66         }
67
68         return nents;
69 }
70
71 static int dma_direct_dma_supported(struct device *dev, u64 mask)
72 {
73         return 1;
74 }
75
76 static inline dma_addr_t dma_direct_map_page(struct device *dev,
77                                              struct page *page,
78                                              unsigned long offset,
79                                              size_t size,
80                                              enum dma_data_direction direction,
81                                              unsigned long attrs)
82 {
83         __dma_sync(page_to_phys(page) + offset, size, direction);
84         return page_to_phys(page) + offset;
85 }
86
87 static inline void dma_direct_unmap_page(struct device *dev,
88                                          dma_addr_t dma_address,
89                                          size_t size,
90                                          enum dma_data_direction direction,
91                                          unsigned long attrs)
92 {
93 /* There is not necessary to do cache cleanup
94  *
95  * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
96  * dma_address is physical address
97  */
98         __dma_sync(dma_address, size, direction);
99 }
100
101 static inline void
102 dma_direct_sync_single_for_cpu(struct device *dev,
103                                dma_addr_t dma_handle, size_t size,
104                                enum dma_data_direction direction)
105 {
106         /*
107          * It's pointless to flush the cache as the memory segment
108          * is given to the CPU
109          */
110
111         if (direction == DMA_FROM_DEVICE)
112                 __dma_sync(dma_handle, size, direction);
113 }
114
115 static inline void
116 dma_direct_sync_single_for_device(struct device *dev,
117                                   dma_addr_t dma_handle, size_t size,
118                                   enum dma_data_direction direction)
119 {
120         /*
121          * It's pointless to invalidate the cache if the device isn't
122          * supposed to write to the relevant region
123          */
124
125         if (direction == DMA_TO_DEVICE)
126                 __dma_sync(dma_handle, size, direction);
127 }
128
129 static inline void
130 dma_direct_sync_sg_for_cpu(struct device *dev,
131                            struct scatterlist *sgl, int nents,
132                            enum dma_data_direction direction)
133 {
134         struct scatterlist *sg;
135         int i;
136
137         /* FIXME this part of code is untested */
138         if (direction == DMA_FROM_DEVICE)
139                 for_each_sg(sgl, sg, nents, i)
140                         __dma_sync(sg->dma_address, sg->length, direction);
141 }
142
143 static inline void
144 dma_direct_sync_sg_for_device(struct device *dev,
145                               struct scatterlist *sgl, int nents,
146                               enum dma_data_direction direction)
147 {
148         struct scatterlist *sg;
149         int i;
150
151         /* FIXME this part of code is untested */
152         if (direction == DMA_TO_DEVICE)
153                 for_each_sg(sgl, sg, nents, i)
154                         __dma_sync(sg->dma_address, sg->length, direction);
155 }
156
157 static
158 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
159                              void *cpu_addr, dma_addr_t handle, size_t size,
160                              unsigned long attrs)
161 {
162 #ifdef CONFIG_MMU
163         unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
164         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
165         unsigned long off = vma->vm_pgoff;
166         unsigned long pfn;
167
168         if (off >= count || user_count > (count - off))
169                 return -ENXIO;
170
171 #ifdef NOT_COHERENT_CACHE
172         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
173         pfn = consistent_virt_to_pfn(cpu_addr);
174 #else
175         pfn = virt_to_pfn(cpu_addr);
176 #endif
177         return remap_pfn_range(vma, vma->vm_start, pfn + off,
178                                vma->vm_end - vma->vm_start, vma->vm_page_prot);
179 #else
180         return -ENXIO;
181 #endif
182 }
183
184 struct dma_map_ops dma_direct_ops = {
185         .alloc          = dma_direct_alloc_coherent,
186         .free           = dma_direct_free_coherent,
187         .mmap           = dma_direct_mmap_coherent,
188         .map_sg         = dma_direct_map_sg,
189         .dma_supported  = dma_direct_dma_supported,
190         .map_page       = dma_direct_map_page,
191         .unmap_page     = dma_direct_unmap_page,
192         .sync_single_for_cpu            = dma_direct_sync_single_for_cpu,
193         .sync_single_for_device         = dma_direct_sync_single_for_device,
194         .sync_sg_for_cpu                = dma_direct_sync_sg_for_cpu,
195         .sync_sg_for_device             = dma_direct_sync_sg_for_device,
196 };
197 EXPORT_SYMBOL(dma_direct_ops);
198
199 /* Number of entries preallocated for DMA-API debugging */
200 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
201
202 static int __init dma_init(void)
203 {
204         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
205
206         return 0;
207 }
208 fs_initcall(dma_init);