[media] vb2-dma-sg: move dma_(un)map_sg here
[cascardo/linux.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
23
24 static int debug;
25 module_param(debug, int, 0644);
26
27 #define dprintk(level, fmt, arg...)                                     \
28         do {                                                            \
29                 if (debug >= level)                                     \
30                         printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);  \
31         } while (0)
32
33 struct vb2_dma_sg_conf {
34         struct device           *dev;
35 };
36
37 struct vb2_dma_sg_buf {
38         struct device                   *dev;
39         void                            *vaddr;
40         struct page                     **pages;
41         int                             offset;
42         enum dma_data_direction         dma_dir;
43         struct sg_table                 sg_table;
44         size_t                          size;
45         unsigned int                    num_pages;
46         atomic_t                        refcount;
47         struct vb2_vmarea_handler       handler;
48         struct vm_area_struct           *vma;
49 };
50
51 static void vb2_dma_sg_put(void *buf_priv);
52
53 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
54                 gfp_t gfp_flags)
55 {
56         unsigned int last_page = 0;
57         int size = buf->size;
58
59         while (size > 0) {
60                 struct page *pages;
61                 int order;
62                 int i;
63
64                 order = get_order(size);
65                 /* Dont over allocate*/
66                 if ((PAGE_SIZE << order) > size)
67                         order--;
68
69                 pages = NULL;
70                 while (!pages) {
71                         pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
72                                         __GFP_NOWARN | gfp_flags, order);
73                         if (pages)
74                                 break;
75
76                         if (order == 0) {
77                                 while (last_page--)
78                                         __free_page(buf->pages[last_page]);
79                                 return -ENOMEM;
80                         }
81                         order--;
82                 }
83
84                 split_page(pages, order);
85                 for (i = 0; i < (1 << order); i++)
86                         buf->pages[last_page++] = &pages[i];
87
88                 size -= PAGE_SIZE << order;
89         }
90
91         return 0;
92 }
93
94 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
95                               enum dma_data_direction dma_dir, gfp_t gfp_flags)
96 {
97         struct vb2_dma_sg_conf *conf = alloc_ctx;
98         struct vb2_dma_sg_buf *buf;
99         struct sg_table *sgt;
100         int ret;
101         int num_pages;
102
103         if (WARN_ON(alloc_ctx == NULL))
104                 return NULL;
105         buf = kzalloc(sizeof *buf, GFP_KERNEL);
106         if (!buf)
107                 return NULL;
108
109         buf->vaddr = NULL;
110         buf->dma_dir = dma_dir;
111         buf->offset = 0;
112         buf->size = size;
113         /* size is already page aligned */
114         buf->num_pages = size >> PAGE_SHIFT;
115
116         buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
117                              GFP_KERNEL);
118         if (!buf->pages)
119                 goto fail_pages_array_alloc;
120
121         ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
122         if (ret)
123                 goto fail_pages_alloc;
124
125         ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
126                         buf->num_pages, 0, size, GFP_KERNEL);
127         if (ret)
128                 goto fail_table_alloc;
129
130         /* Prevent the device from being released while the buffer is used */
131         buf->dev = get_device(conf->dev);
132
133         sgt = &buf->sg_table;
134         if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
135                 goto fail_map;
136         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
137
138         buf->handler.refcount = &buf->refcount;
139         buf->handler.put = vb2_dma_sg_put;
140         buf->handler.arg = buf;
141
142         atomic_inc(&buf->refcount);
143
144         dprintk(1, "%s: Allocated buffer of %d pages\n",
145                 __func__, buf->num_pages);
146         return buf;
147
148 fail_map:
149         put_device(buf->dev);
150         sg_free_table(sgt);
151 fail_table_alloc:
152         num_pages = buf->num_pages;
153         while (num_pages--)
154                 __free_page(buf->pages[num_pages]);
155 fail_pages_alloc:
156         kfree(buf->pages);
157 fail_pages_array_alloc:
158         kfree(buf);
159         return NULL;
160 }
161
162 static void vb2_dma_sg_put(void *buf_priv)
163 {
164         struct vb2_dma_sg_buf *buf = buf_priv;
165         struct sg_table *sgt = &buf->sg_table;
166         int i = buf->num_pages;
167
168         if (atomic_dec_and_test(&buf->refcount)) {
169                 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
170                         buf->num_pages);
171                 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
172                 if (buf->vaddr)
173                         vm_unmap_ram(buf->vaddr, buf->num_pages);
174                 sg_free_table(&buf->sg_table);
175                 while (--i >= 0)
176                         __free_page(buf->pages[i]);
177                 kfree(buf->pages);
178                 put_device(buf->dev);
179                 kfree(buf);
180         }
181 }
182
183 static void vb2_dma_sg_prepare(void *buf_priv)
184 {
185         struct vb2_dma_sg_buf *buf = buf_priv;
186         struct sg_table *sgt = &buf->sg_table;
187
188         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
189 }
190
191 static void vb2_dma_sg_finish(void *buf_priv)
192 {
193         struct vb2_dma_sg_buf *buf = buf_priv;
194         struct sg_table *sgt = &buf->sg_table;
195
196         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
197 }
198
199 static inline int vma_is_io(struct vm_area_struct *vma)
200 {
201         return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
202 }
203
204 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
205                                     unsigned long size,
206                                     enum dma_data_direction dma_dir)
207 {
208         struct vb2_dma_sg_conf *conf = alloc_ctx;
209         struct vb2_dma_sg_buf *buf;
210         unsigned long first, last;
211         int num_pages_from_user;
212         struct vm_area_struct *vma;
213         struct sg_table *sgt;
214
215         buf = kzalloc(sizeof *buf, GFP_KERNEL);
216         if (!buf)
217                 return NULL;
218
219         buf->vaddr = NULL;
220         buf->dev = conf->dev;
221         buf->dma_dir = dma_dir;
222         buf->offset = vaddr & ~PAGE_MASK;
223         buf->size = size;
224
225         first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
226         last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
227         buf->num_pages = last - first + 1;
228
229         buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
230                              GFP_KERNEL);
231         if (!buf->pages)
232                 goto userptr_fail_alloc_pages;
233
234         vma = find_vma(current->mm, vaddr);
235         if (!vma) {
236                 dprintk(1, "no vma for address %lu\n", vaddr);
237                 goto userptr_fail_find_vma;
238         }
239
240         if (vma->vm_end < vaddr + size) {
241                 dprintk(1, "vma at %lu is too small for %lu bytes\n",
242                         vaddr, size);
243                 goto userptr_fail_find_vma;
244         }
245
246         buf->vma = vb2_get_vma(vma);
247         if (!buf->vma) {
248                 dprintk(1, "failed to copy vma\n");
249                 goto userptr_fail_find_vma;
250         }
251
252         if (vma_is_io(buf->vma)) {
253                 for (num_pages_from_user = 0;
254                      num_pages_from_user < buf->num_pages;
255                      ++num_pages_from_user, vaddr += PAGE_SIZE) {
256                         unsigned long pfn;
257
258                         if (follow_pfn(vma, vaddr, &pfn)) {
259                                 dprintk(1, "no page for address %lu\n", vaddr);
260                                 break;
261                         }
262                         buf->pages[num_pages_from_user] = pfn_to_page(pfn);
263                 }
264         } else
265                 num_pages_from_user = get_user_pages(current, current->mm,
266                                              vaddr & PAGE_MASK,
267                                              buf->num_pages,
268                                              buf->dma_dir == DMA_FROM_DEVICE,
269                                              1, /* force */
270                                              buf->pages,
271                                              NULL);
272
273         if (num_pages_from_user != buf->num_pages)
274                 goto userptr_fail_get_user_pages;
275
276         if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
277                         buf->num_pages, buf->offset, size, 0))
278                 goto userptr_fail_alloc_table_from_pages;
279
280         sgt = &buf->sg_table;
281         if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
282                 goto userptr_fail_map;
283         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
284         return buf;
285
286 userptr_fail_map:
287         sg_free_table(&buf->sg_table);
288 userptr_fail_alloc_table_from_pages:
289 userptr_fail_get_user_pages:
290         dprintk(1, "get_user_pages requested/got: %d/%d]\n",
291                 buf->num_pages, num_pages_from_user);
292         if (!vma_is_io(buf->vma))
293                 while (--num_pages_from_user >= 0)
294                         put_page(buf->pages[num_pages_from_user]);
295         vb2_put_vma(buf->vma);
296 userptr_fail_find_vma:
297         kfree(buf->pages);
298 userptr_fail_alloc_pages:
299         kfree(buf);
300         return NULL;
301 }
302
303 /*
304  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
305  *               be used
306  */
307 static void vb2_dma_sg_put_userptr(void *buf_priv)
308 {
309         struct vb2_dma_sg_buf *buf = buf_priv;
310         struct sg_table *sgt = &buf->sg_table;
311         int i = buf->num_pages;
312
313         dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
314                __func__, buf->num_pages);
315         dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
316         if (buf->vaddr)
317                 vm_unmap_ram(buf->vaddr, buf->num_pages);
318         sg_free_table(&buf->sg_table);
319         while (--i >= 0) {
320                 if (buf->dma_dir == DMA_FROM_DEVICE)
321                         set_page_dirty_lock(buf->pages[i]);
322                 if (!vma_is_io(buf->vma))
323                         put_page(buf->pages[i]);
324         }
325         kfree(buf->pages);
326         vb2_put_vma(buf->vma);
327         kfree(buf);
328 }
329
330 static void *vb2_dma_sg_vaddr(void *buf_priv)
331 {
332         struct vb2_dma_sg_buf *buf = buf_priv;
333
334         BUG_ON(!buf);
335
336         if (!buf->vaddr)
337                 buf->vaddr = vm_map_ram(buf->pages,
338                                         buf->num_pages,
339                                         -1,
340                                         PAGE_KERNEL);
341
342         /* add offset in case userptr is not page-aligned */
343         return buf->vaddr + buf->offset;
344 }
345
346 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
347 {
348         struct vb2_dma_sg_buf *buf = buf_priv;
349
350         return atomic_read(&buf->refcount);
351 }
352
353 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
354 {
355         struct vb2_dma_sg_buf *buf = buf_priv;
356         unsigned long uaddr = vma->vm_start;
357         unsigned long usize = vma->vm_end - vma->vm_start;
358         int i = 0;
359
360         if (!buf) {
361                 printk(KERN_ERR "No memory to map\n");
362                 return -EINVAL;
363         }
364
365         do {
366                 int ret;
367
368                 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
369                 if (ret) {
370                         printk(KERN_ERR "Remapping memory, error: %d\n", ret);
371                         return ret;
372                 }
373
374                 uaddr += PAGE_SIZE;
375                 usize -= PAGE_SIZE;
376         } while (usize > 0);
377
378
379         /*
380          * Use common vm_area operations to track buffer refcount.
381          */
382         vma->vm_private_data    = &buf->handler;
383         vma->vm_ops             = &vb2_common_vm_ops;
384
385         vma->vm_ops->open(vma);
386
387         return 0;
388 }
389
390 static void *vb2_dma_sg_cookie(void *buf_priv)
391 {
392         struct vb2_dma_sg_buf *buf = buf_priv;
393
394         return &buf->sg_table;
395 }
396
397 const struct vb2_mem_ops vb2_dma_sg_memops = {
398         .alloc          = vb2_dma_sg_alloc,
399         .put            = vb2_dma_sg_put,
400         .get_userptr    = vb2_dma_sg_get_userptr,
401         .put_userptr    = vb2_dma_sg_put_userptr,
402         .prepare        = vb2_dma_sg_prepare,
403         .finish         = vb2_dma_sg_finish,
404         .vaddr          = vb2_dma_sg_vaddr,
405         .mmap           = vb2_dma_sg_mmap,
406         .num_users      = vb2_dma_sg_num_users,
407         .cookie         = vb2_dma_sg_cookie,
408 };
409 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
410
411 void *vb2_dma_sg_init_ctx(struct device *dev)
412 {
413         struct vb2_dma_sg_conf *conf;
414
415         conf = kzalloc(sizeof(*conf), GFP_KERNEL);
416         if (!conf)
417                 return ERR_PTR(-ENOMEM);
418
419         conf->dev = dev;
420
421         return conf;
422 }
423 EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
424
425 void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
426 {
427         if (!IS_ERR_OR_NULL(alloc_ctx))
428                 kfree(alloc_ctx);
429 }
430 EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
431
432 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
433 MODULE_AUTHOR("Andrzej Pietrasiewicz");
434 MODULE_LICENSE("GPL");