2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
37 struct vb2_vmarea_handler handler;
39 struct sg_table *sgt_base;
42 struct vm_area_struct *vma;
45 struct dma_buf_attachment *db_attach;
48 /*********************************************/
49 /* scatterlist table functions */
50 /*********************************************/
53 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
56 struct scatterlist *s;
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
65 for (j = 0; j < n_pages; ++j, ++page)
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
72 struct scatterlist *s;
73 dma_addr_t expected = sg_dma_address(sgt->sgl);
75 unsigned long size = 0;
77 for_each_sg(sgt->sgl, s, sgt->nents, i) {
78 if (sg_dma_address(s) != expected)
80 expected = sg_dma_address(s) + sg_dma_len(s);
81 size += sg_dma_len(s);
86 /*********************************************/
87 /* callbacks for all buffers */
88 /*********************************************/
90 static void *vb2_dc_cookie(void *buf_priv)
92 struct vb2_dc_buf *buf = buf_priv;
94 return &buf->dma_addr;
97 static void *vb2_dc_vaddr(void *buf_priv)
99 struct vb2_dc_buf *buf = buf_priv;
101 if (!buf->vaddr && buf->db_attach)
102 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
107 static unsigned int vb2_dc_num_users(void *buf_priv)
109 struct vb2_dc_buf *buf = buf_priv;
111 return atomic_read(&buf->refcount);
114 static void vb2_dc_prepare(void *buf_priv)
116 struct vb2_dc_buf *buf = buf_priv;
117 struct sg_table *sgt = buf->dma_sgt;
119 /* DMABUF exporter will flush the cache for us */
120 if (!sgt || buf->db_attach)
123 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
126 static void vb2_dc_finish(void *buf_priv)
128 struct vb2_dc_buf *buf = buf_priv;
129 struct sg_table *sgt = buf->dma_sgt;
131 /* DMABUF exporter will flush the cache for us */
132 if (!sgt || buf->db_attach)
135 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
138 /*********************************************/
139 /* callbacks for MMAP buffers */
140 /*********************************************/
142 static void vb2_dc_put(void *buf_priv)
144 struct vb2_dc_buf *buf = buf_priv;
146 if (!atomic_dec_and_test(&buf->refcount))
150 sg_free_table(buf->sgt_base);
151 kfree(buf->sgt_base);
153 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
154 put_device(buf->dev);
158 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
160 struct vb2_dc_conf *conf = alloc_ctx;
161 struct device *dev = conf->dev;
162 struct vb2_dc_buf *buf;
164 buf = kzalloc(sizeof *buf, GFP_KERNEL);
166 return ERR_PTR(-ENOMEM);
168 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
169 GFP_KERNEL | gfp_flags);
171 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
173 return ERR_PTR(-ENOMEM);
176 /* Prevent the device from being released while the buffer is used */
177 buf->dev = get_device(dev);
180 buf->handler.refcount = &buf->refcount;
181 buf->handler.put = vb2_dc_put;
182 buf->handler.arg = buf;
184 atomic_inc(&buf->refcount);
189 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
191 struct vb2_dc_buf *buf = buf_priv;
195 printk(KERN_ERR "No buffer to map\n");
200 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
205 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
206 buf->dma_addr, buf->size);
209 pr_err("Remapping memory failed, error: %d\n", ret);
213 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
214 vma->vm_private_data = &buf->handler;
215 vma->vm_ops = &vb2_common_vm_ops;
217 vma->vm_ops->open(vma);
219 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
220 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
226 /*********************************************/
227 /* DMABUF ops for exporters */
228 /*********************************************/
230 struct vb2_dc_attachment {
232 enum dma_data_direction dir;
235 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
236 struct dma_buf_attachment *dbuf_attach)
238 struct vb2_dc_attachment *attach;
240 struct scatterlist *rd, *wr;
241 struct sg_table *sgt;
242 struct vb2_dc_buf *buf = dbuf->priv;
245 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
250 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
251 * map the same scatter list to multiple attachments at the same time.
253 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
259 rd = buf->sgt_base->sgl;
261 for (i = 0; i < sgt->orig_nents; ++i) {
262 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
267 attach->dir = DMA_NONE;
268 dbuf_attach->priv = attach;
273 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
274 struct dma_buf_attachment *db_attach)
276 struct vb2_dc_attachment *attach = db_attach->priv;
277 struct sg_table *sgt;
284 /* release the scatterlist cache */
285 if (attach->dir != DMA_NONE)
286 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
290 db_attach->priv = NULL;
293 static struct sg_table *vb2_dc_dmabuf_ops_map(
294 struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
296 struct vb2_dc_attachment *attach = db_attach->priv;
297 /* stealing dmabuf mutex to serialize map/unmap operations */
298 struct mutex *lock = &db_attach->dmabuf->lock;
299 struct sg_table *sgt;
305 /* return previously mapped sg table */
306 if (attach->dir == dir) {
311 /* release any previous cache */
312 if (attach->dir != DMA_NONE) {
313 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
315 attach->dir = DMA_NONE;
318 /* mapping to the client with new direction */
319 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
321 pr_err("failed to map scatterlist\n");
323 return ERR_PTR(-EIO);
333 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
334 struct sg_table *sgt, enum dma_data_direction dir)
336 /* nothing to be done here */
339 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
341 /* drop reference obtained in vb2_dc_get_dmabuf */
342 vb2_dc_put(dbuf->priv);
345 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
347 struct vb2_dc_buf *buf = dbuf->priv;
349 return buf->vaddr + pgnum * PAGE_SIZE;
352 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
354 struct vb2_dc_buf *buf = dbuf->priv;
359 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
360 struct vm_area_struct *vma)
362 return vb2_dc_mmap(dbuf->priv, vma);
365 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
366 .attach = vb2_dc_dmabuf_ops_attach,
367 .detach = vb2_dc_dmabuf_ops_detach,
368 .map_dma_buf = vb2_dc_dmabuf_ops_map,
369 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
370 .kmap = vb2_dc_dmabuf_ops_kmap,
371 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
372 .vmap = vb2_dc_dmabuf_ops_vmap,
373 .mmap = vb2_dc_dmabuf_ops_mmap,
374 .release = vb2_dc_dmabuf_ops_release,
377 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
380 struct sg_table *sgt;
382 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
384 dev_err(buf->dev, "failed to alloc sg table\n");
388 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
391 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
399 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
401 struct vb2_dc_buf *buf = buf_priv;
402 struct dma_buf *dbuf;
405 buf->sgt_base = vb2_dc_get_base_sgt(buf);
407 if (WARN_ON(!buf->sgt_base))
410 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL);
414 /* dmabuf keeps reference to vb2 buffer */
415 atomic_inc(&buf->refcount);
420 /*********************************************/
421 /* callbacks for USERPTR buffers */
422 /*********************************************/
424 static inline int vma_is_io(struct vm_area_struct *vma)
426 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
429 static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
430 struct vm_area_struct *vma, unsigned long *res)
432 unsigned long pfn, start_pfn, prev_pfn;
439 ret = follow_pfn(vma, start, &pfn);
446 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
448 ret = follow_pfn(vma, start, &pfn);
451 pr_err("no page for address %lu\n", start);
454 if (pfn != prev_pfn + 1)
462 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
463 int n_pages, struct vm_area_struct *vma, int write)
465 if (vma_is_io(vma)) {
468 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
470 int ret = follow_pfn(vma, start, &pfn);
476 pr_err("no page for address %lu\n", start);
479 pages[i] = pfn_to_page(pfn);
484 n = get_user_pages(current, current->mm, start & PAGE_MASK,
485 n_pages, write, 1, pages, NULL);
486 /* negative error means that no page was pinned */
489 pr_err("got only %d of %d user pages\n", n, n_pages);
491 put_page(pages[--n]);
499 static void vb2_dc_put_dirty_page(struct page *page)
501 set_page_dirty_lock(page);
505 static void vb2_dc_put_userptr(void *buf_priv)
507 struct vb2_dc_buf *buf = buf_priv;
508 struct sg_table *sgt = buf->dma_sgt;
511 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
512 if (!vma_is_io(buf->vma))
513 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
518 vb2_put_vma(buf->vma);
523 * For some kind of reserved memory there might be no struct page available,
524 * so all that can be done to support such 'pages' is to try to convert
525 * pfn to dma address or at the last resort just assume that
526 * dma address == physical address (like it has been assumed in earlier version
527 * of videobuf2-dma-contig
530 #ifdef __arch_pfn_to_dma
531 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
533 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
535 #elif defined(__pfn_to_bus)
536 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
538 return (dma_addr_t)__pfn_to_bus(pfn);
540 #elif defined(__pfn_to_phys)
541 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
543 return (dma_addr_t)__pfn_to_phys(pfn);
546 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
548 /* really, we cannot do anything better at this point */
549 return (dma_addr_t)(pfn) << PAGE_SHIFT;
553 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
554 unsigned long size, int write)
556 struct vb2_dc_conf *conf = alloc_ctx;
557 struct vb2_dc_buf *buf;
560 unsigned long offset;
564 struct vm_area_struct *vma;
565 struct sg_table *sgt;
566 unsigned long contig_size;
567 unsigned long dma_align = dma_get_cache_alignment();
569 /* Only cache aligned DMA transfers are reliable */
570 if (!IS_ALIGNED(vaddr | size, dma_align)) {
571 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
572 return ERR_PTR(-EINVAL);
576 pr_debug("size is zero\n");
577 return ERR_PTR(-EINVAL);
580 buf = kzalloc(sizeof *buf, GFP_KERNEL);
582 return ERR_PTR(-ENOMEM);
584 buf->dev = conf->dev;
585 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
587 start = vaddr & PAGE_MASK;
588 offset = vaddr & ~PAGE_MASK;
589 end = PAGE_ALIGN(vaddr + size);
590 n_pages = (end - start) >> PAGE_SHIFT;
592 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
595 pr_err("failed to allocate pages table\n");
599 /* current->mm->mmap_sem is taken by videobuf2 core */
600 vma = find_vma(current->mm, vaddr);
602 pr_err("no vma for address %lu\n", vaddr);
607 if (vma->vm_end < vaddr + size) {
608 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
613 buf->vma = vb2_get_vma(vma);
615 pr_err("failed to copy vma\n");
620 /* extract page list from userspace mapping */
621 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
624 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
625 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
631 pr_err("failed to get user pages\n");
635 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
637 pr_err("failed to allocate sg table\n");
639 goto fail_get_user_pages;
642 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
643 offset, size, GFP_KERNEL);
645 pr_err("failed to initialize sg table\n");
649 /* pages are no longer needed */
653 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
655 if (sgt->nents <= 0) {
656 pr_err("failed to map scatterlist\n");
661 contig_size = vb2_dc_get_contiguous_size(sgt);
662 if (contig_size < size) {
663 pr_err("contiguous mapping is too small %lu/%lu\n",
669 buf->dma_addr = sg_dma_address(sgt->sgl);
676 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
679 if (!vma_is_io(buf->vma))
680 vb2_dc_sgt_foreach_page(sgt, put_page);
687 if (pages && !vma_is_io(buf->vma))
689 put_page(pages[--n_pages]);
692 vb2_put_vma(buf->vma);
695 kfree(pages); /* kfree is NULL-proof */
703 /*********************************************/
704 /* callbacks for DMABUF buffers */
705 /*********************************************/
707 static int vb2_dc_map_dmabuf(void *mem_priv)
709 struct vb2_dc_buf *buf = mem_priv;
710 struct sg_table *sgt;
711 unsigned long contig_size;
713 if (WARN_ON(!buf->db_attach)) {
714 pr_err("trying to pin a non attached buffer\n");
718 if (WARN_ON(buf->dma_sgt)) {
719 pr_err("dmabuf buffer is already pinned\n");
723 /* get the associated scatterlist for this buffer */
724 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
726 pr_err("Error getting dmabuf scatterlist\n");
730 /* checking if dmabuf is big enough to store contiguous chunk */
731 contig_size = vb2_dc_get_contiguous_size(sgt);
732 if (contig_size < buf->size) {
733 pr_err("contiguous chunk is too small %lu/%lu b\n",
734 contig_size, buf->size);
735 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
739 buf->dma_addr = sg_dma_address(sgt->sgl);
746 static void vb2_dc_unmap_dmabuf(void *mem_priv)
748 struct vb2_dc_buf *buf = mem_priv;
749 struct sg_table *sgt = buf->dma_sgt;
751 if (WARN_ON(!buf->db_attach)) {
752 pr_err("trying to unpin a not attached buffer\n");
757 pr_err("dmabuf buffer is already unpinned\n");
762 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
765 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
771 static void vb2_dc_detach_dmabuf(void *mem_priv)
773 struct vb2_dc_buf *buf = mem_priv;
775 /* if vb2 works correctly you should never detach mapped buffer */
776 if (WARN_ON(buf->dma_addr))
777 vb2_dc_unmap_dmabuf(buf);
779 /* detach this attachment */
780 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
784 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
785 unsigned long size, int write)
787 struct vb2_dc_conf *conf = alloc_ctx;
788 struct vb2_dc_buf *buf;
789 struct dma_buf_attachment *dba;
791 if (dbuf->size < size)
792 return ERR_PTR(-EFAULT);
794 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
796 return ERR_PTR(-ENOMEM);
798 buf->dev = conf->dev;
799 /* create attachment for the dmabuf with the user device */
800 dba = dma_buf_attach(dbuf, buf->dev);
802 pr_err("failed to attach dmabuf\n");
807 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
809 buf->db_attach = dba;
814 /*********************************************/
815 /* DMA CONTIG exported functions */
816 /*********************************************/
818 const struct vb2_mem_ops vb2_dma_contig_memops = {
819 .alloc = vb2_dc_alloc,
821 .get_dmabuf = vb2_dc_get_dmabuf,
822 .cookie = vb2_dc_cookie,
823 .vaddr = vb2_dc_vaddr,
825 .get_userptr = vb2_dc_get_userptr,
826 .put_userptr = vb2_dc_put_userptr,
827 .prepare = vb2_dc_prepare,
828 .finish = vb2_dc_finish,
829 .map_dmabuf = vb2_dc_map_dmabuf,
830 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
831 .attach_dmabuf = vb2_dc_attach_dmabuf,
832 .detach_dmabuf = vb2_dc_detach_dmabuf,
833 .num_users = vb2_dc_num_users,
835 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
837 void *vb2_dma_contig_init_ctx(struct device *dev)
839 struct vb2_dc_conf *conf;
841 conf = kzalloc(sizeof *conf, GFP_KERNEL);
843 return ERR_PTR(-ENOMEM);
849 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
851 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
855 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
857 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
858 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
859 MODULE_LICENSE("GPL");