2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
25 module_param(debug, int, 0644);
27 #define dprintk(level, fmt, arg...) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
33 struct vb2_dma_sg_conf {
37 struct vb2_dma_sg_buf {
42 enum dma_data_direction dma_dir;
43 struct sg_table sg_table;
45 unsigned int num_pages;
47 struct vb2_vmarea_handler handler;
48 struct vm_area_struct *vma;
51 static void vb2_dma_sg_put(void *buf_priv);
53 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
56 unsigned int last_page = 0;
64 order = get_order(size);
65 /* Dont over allocate*/
66 if ((PAGE_SIZE << order) > size)
71 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
72 __GFP_NOWARN | gfp_flags, order);
78 __free_page(buf->pages[last_page]);
84 split_page(pages, order);
85 for (i = 0; i < (1 << order); i++)
86 buf->pages[last_page++] = &pages[i];
88 size -= PAGE_SIZE << order;
94 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
95 enum dma_data_direction dma_dir, gfp_t gfp_flags)
97 struct vb2_dma_sg_conf *conf = alloc_ctx;
98 struct vb2_dma_sg_buf *buf;
103 if (WARN_ON(alloc_ctx == NULL))
105 buf = kzalloc(sizeof *buf, GFP_KERNEL);
110 buf->dma_dir = dma_dir;
113 /* size is already page aligned */
114 buf->num_pages = size >> PAGE_SHIFT;
116 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
119 goto fail_pages_array_alloc;
121 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
123 goto fail_pages_alloc;
125 ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
126 buf->num_pages, 0, size, GFP_KERNEL);
128 goto fail_table_alloc;
130 /* Prevent the device from being released while the buffer is used */
131 buf->dev = get_device(conf->dev);
133 sgt = &buf->sg_table;
134 if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
136 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
138 buf->handler.refcount = &buf->refcount;
139 buf->handler.put = vb2_dma_sg_put;
140 buf->handler.arg = buf;
142 atomic_inc(&buf->refcount);
144 dprintk(1, "%s: Allocated buffer of %d pages\n",
145 __func__, buf->num_pages);
149 put_device(buf->dev);
152 num_pages = buf->num_pages;
154 __free_page(buf->pages[num_pages]);
157 fail_pages_array_alloc:
162 static void vb2_dma_sg_put(void *buf_priv)
164 struct vb2_dma_sg_buf *buf = buf_priv;
165 struct sg_table *sgt = &buf->sg_table;
166 int i = buf->num_pages;
168 if (atomic_dec_and_test(&buf->refcount)) {
169 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
171 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
173 vm_unmap_ram(buf->vaddr, buf->num_pages);
174 sg_free_table(&buf->sg_table);
176 __free_page(buf->pages[i]);
178 put_device(buf->dev);
183 static void vb2_dma_sg_prepare(void *buf_priv)
185 struct vb2_dma_sg_buf *buf = buf_priv;
186 struct sg_table *sgt = &buf->sg_table;
188 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
191 static void vb2_dma_sg_finish(void *buf_priv)
193 struct vb2_dma_sg_buf *buf = buf_priv;
194 struct sg_table *sgt = &buf->sg_table;
196 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
199 static inline int vma_is_io(struct vm_area_struct *vma)
201 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
204 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
206 enum dma_data_direction dma_dir)
208 struct vb2_dma_sg_conf *conf = alloc_ctx;
209 struct vb2_dma_sg_buf *buf;
210 unsigned long first, last;
211 int num_pages_from_user;
212 struct vm_area_struct *vma;
213 struct sg_table *sgt;
215 buf = kzalloc(sizeof *buf, GFP_KERNEL);
220 buf->dev = conf->dev;
221 buf->dma_dir = dma_dir;
222 buf->offset = vaddr & ~PAGE_MASK;
225 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
226 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
227 buf->num_pages = last - first + 1;
229 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
232 goto userptr_fail_alloc_pages;
234 vma = find_vma(current->mm, vaddr);
236 dprintk(1, "no vma for address %lu\n", vaddr);
237 goto userptr_fail_find_vma;
240 if (vma->vm_end < vaddr + size) {
241 dprintk(1, "vma at %lu is too small for %lu bytes\n",
243 goto userptr_fail_find_vma;
246 buf->vma = vb2_get_vma(vma);
248 dprintk(1, "failed to copy vma\n");
249 goto userptr_fail_find_vma;
252 if (vma_is_io(buf->vma)) {
253 for (num_pages_from_user = 0;
254 num_pages_from_user < buf->num_pages;
255 ++num_pages_from_user, vaddr += PAGE_SIZE) {
258 if (follow_pfn(vma, vaddr, &pfn)) {
259 dprintk(1, "no page for address %lu\n", vaddr);
262 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
265 num_pages_from_user = get_user_pages(current, current->mm,
268 buf->dma_dir == DMA_FROM_DEVICE,
273 if (num_pages_from_user != buf->num_pages)
274 goto userptr_fail_get_user_pages;
276 if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
277 buf->num_pages, buf->offset, size, 0))
278 goto userptr_fail_alloc_table_from_pages;
280 sgt = &buf->sg_table;
281 if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
282 goto userptr_fail_map;
283 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
287 sg_free_table(&buf->sg_table);
288 userptr_fail_alloc_table_from_pages:
289 userptr_fail_get_user_pages:
290 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
291 buf->num_pages, num_pages_from_user);
292 if (!vma_is_io(buf->vma))
293 while (--num_pages_from_user >= 0)
294 put_page(buf->pages[num_pages_from_user]);
295 vb2_put_vma(buf->vma);
296 userptr_fail_find_vma:
298 userptr_fail_alloc_pages:
304 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
307 static void vb2_dma_sg_put_userptr(void *buf_priv)
309 struct vb2_dma_sg_buf *buf = buf_priv;
310 struct sg_table *sgt = &buf->sg_table;
311 int i = buf->num_pages;
313 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
314 __func__, buf->num_pages);
315 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
317 vm_unmap_ram(buf->vaddr, buf->num_pages);
318 sg_free_table(&buf->sg_table);
320 if (buf->dma_dir == DMA_FROM_DEVICE)
321 set_page_dirty_lock(buf->pages[i]);
322 if (!vma_is_io(buf->vma))
323 put_page(buf->pages[i]);
326 vb2_put_vma(buf->vma);
330 static void *vb2_dma_sg_vaddr(void *buf_priv)
332 struct vb2_dma_sg_buf *buf = buf_priv;
337 buf->vaddr = vm_map_ram(buf->pages,
342 /* add offset in case userptr is not page-aligned */
343 return buf->vaddr + buf->offset;
346 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
348 struct vb2_dma_sg_buf *buf = buf_priv;
350 return atomic_read(&buf->refcount);
353 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
355 struct vb2_dma_sg_buf *buf = buf_priv;
356 unsigned long uaddr = vma->vm_start;
357 unsigned long usize = vma->vm_end - vma->vm_start;
361 printk(KERN_ERR "No memory to map\n");
368 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
370 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
380 * Use common vm_area operations to track buffer refcount.
382 vma->vm_private_data = &buf->handler;
383 vma->vm_ops = &vb2_common_vm_ops;
385 vma->vm_ops->open(vma);
390 static void *vb2_dma_sg_cookie(void *buf_priv)
392 struct vb2_dma_sg_buf *buf = buf_priv;
394 return &buf->sg_table;
397 const struct vb2_mem_ops vb2_dma_sg_memops = {
398 .alloc = vb2_dma_sg_alloc,
399 .put = vb2_dma_sg_put,
400 .get_userptr = vb2_dma_sg_get_userptr,
401 .put_userptr = vb2_dma_sg_put_userptr,
402 .prepare = vb2_dma_sg_prepare,
403 .finish = vb2_dma_sg_finish,
404 .vaddr = vb2_dma_sg_vaddr,
405 .mmap = vb2_dma_sg_mmap,
406 .num_users = vb2_dma_sg_num_users,
407 .cookie = vb2_dma_sg_cookie,
409 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
411 void *vb2_dma_sg_init_ctx(struct device *dev)
413 struct vb2_dma_sg_conf *conf;
415 conf = kzalloc(sizeof(*conf), GFP_KERNEL);
417 return ERR_PTR(-ENOMEM);
423 EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
425 void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
427 if (!IS_ERR_OR_NULL(alloc_ctx))
430 EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
432 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
433 MODULE_AUTHOR("Andrzej Pietrasiewicz");
434 MODULE_LICENSE("GPL");