Merge tag 'scpi-updates-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep...
[cascardo/linux.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25         struct device           *dev;
26         struct dma_attrs        attrs;
27 };
28
29 struct vb2_dc_buf {
30         struct device                   *dev;
31         void                            *vaddr;
32         unsigned long                   size;
33         void                            *cookie;
34         dma_addr_t                      dma_addr;
35         struct dma_attrs                attrs;
36         enum dma_data_direction         dma_dir;
37         struct sg_table                 *dma_sgt;
38         struct frame_vector             *vec;
39
40         /* MMAP related */
41         struct vb2_vmarea_handler       handler;
42         atomic_t                        refcount;
43         struct sg_table                 *sgt_base;
44
45         /* DMABUF related */
46         struct dma_buf_attachment       *db_attach;
47 };
48
49 /*********************************************/
50 /*        scatterlist table functions        */
51 /*********************************************/
52
53 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
54 {
55         struct scatterlist *s;
56         dma_addr_t expected = sg_dma_address(sgt->sgl);
57         unsigned int i;
58         unsigned long size = 0;
59
60         for_each_sg(sgt->sgl, s, sgt->nents, i) {
61                 if (sg_dma_address(s) != expected)
62                         break;
63                 expected = sg_dma_address(s) + sg_dma_len(s);
64                 size += sg_dma_len(s);
65         }
66         return size;
67 }
68
69 /*********************************************/
70 /*         callbacks for all buffers         */
71 /*********************************************/
72
73 static void *vb2_dc_cookie(void *buf_priv)
74 {
75         struct vb2_dc_buf *buf = buf_priv;
76
77         return &buf->dma_addr;
78 }
79
80 static void *vb2_dc_vaddr(void *buf_priv)
81 {
82         struct vb2_dc_buf *buf = buf_priv;
83
84         if (!buf->vaddr && buf->db_attach)
85                 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
86
87         return buf->vaddr;
88 }
89
90 static unsigned int vb2_dc_num_users(void *buf_priv)
91 {
92         struct vb2_dc_buf *buf = buf_priv;
93
94         return atomic_read(&buf->refcount);
95 }
96
97 static void vb2_dc_prepare(void *buf_priv)
98 {
99         struct vb2_dc_buf *buf = buf_priv;
100         struct sg_table *sgt = buf->dma_sgt;
101
102         /* DMABUF exporter will flush the cache for us */
103         if (!sgt || buf->db_attach)
104                 return;
105
106         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
107                                buf->dma_dir);
108 }
109
110 static void vb2_dc_finish(void *buf_priv)
111 {
112         struct vb2_dc_buf *buf = buf_priv;
113         struct sg_table *sgt = buf->dma_sgt;
114
115         /* DMABUF exporter will flush the cache for us */
116         if (!sgt || buf->db_attach)
117                 return;
118
119         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
120 }
121
122 /*********************************************/
123 /*        callbacks for MMAP buffers         */
124 /*********************************************/
125
126 static void vb2_dc_put(void *buf_priv)
127 {
128         struct vb2_dc_buf *buf = buf_priv;
129
130         if (!atomic_dec_and_test(&buf->refcount))
131                 return;
132
133         if (buf->sgt_base) {
134                 sg_free_table(buf->sgt_base);
135                 kfree(buf->sgt_base);
136         }
137         dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
138                         &buf->attrs);
139         put_device(buf->dev);
140         kfree(buf);
141 }
142
143 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
144                           enum dma_data_direction dma_dir, gfp_t gfp_flags)
145 {
146         struct vb2_dc_conf *conf = alloc_ctx;
147         struct device *dev = conf->dev;
148         struct vb2_dc_buf *buf;
149
150         buf = kzalloc(sizeof *buf, GFP_KERNEL);
151         if (!buf)
152                 return ERR_PTR(-ENOMEM);
153
154         buf->attrs = conf->attrs;
155         buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
156                                         GFP_KERNEL | gfp_flags, &buf->attrs);
157         if (!buf->cookie) {
158                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
159                 kfree(buf);
160                 return ERR_PTR(-ENOMEM);
161         }
162
163         if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
164                 buf->vaddr = buf->cookie;
165
166         /* Prevent the device from being released while the buffer is used */
167         buf->dev = get_device(dev);
168         buf->size = size;
169         buf->dma_dir = dma_dir;
170
171         buf->handler.refcount = &buf->refcount;
172         buf->handler.put = vb2_dc_put;
173         buf->handler.arg = buf;
174
175         atomic_inc(&buf->refcount);
176
177         return buf;
178 }
179
180 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
181 {
182         struct vb2_dc_buf *buf = buf_priv;
183         int ret;
184
185         if (!buf) {
186                 printk(KERN_ERR "No buffer to map\n");
187                 return -EINVAL;
188         }
189
190         /*
191          * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
192          * map whole buffer
193          */
194         vma->vm_pgoff = 0;
195
196         ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
197                 buf->dma_addr, buf->size, &buf->attrs);
198
199         if (ret) {
200                 pr_err("Remapping memory failed, error: %d\n", ret);
201                 return ret;
202         }
203
204         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
205         vma->vm_private_data    = &buf->handler;
206         vma->vm_ops             = &vb2_common_vm_ops;
207
208         vma->vm_ops->open(vma);
209
210         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
211                 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
212                 buf->size);
213
214         return 0;
215 }
216
217 /*********************************************/
218 /*         DMABUF ops for exporters          */
219 /*********************************************/
220
221 struct vb2_dc_attachment {
222         struct sg_table sgt;
223         enum dma_data_direction dma_dir;
224 };
225
226 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
227         struct dma_buf_attachment *dbuf_attach)
228 {
229         struct vb2_dc_attachment *attach;
230         unsigned int i;
231         struct scatterlist *rd, *wr;
232         struct sg_table *sgt;
233         struct vb2_dc_buf *buf = dbuf->priv;
234         int ret;
235
236         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
237         if (!attach)
238                 return -ENOMEM;
239
240         sgt = &attach->sgt;
241         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
242          * map the same scatter list to multiple attachments at the same time.
243          */
244         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
245         if (ret) {
246                 kfree(attach);
247                 return -ENOMEM;
248         }
249
250         rd = buf->sgt_base->sgl;
251         wr = sgt->sgl;
252         for (i = 0; i < sgt->orig_nents; ++i) {
253                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
254                 rd = sg_next(rd);
255                 wr = sg_next(wr);
256         }
257
258         attach->dma_dir = DMA_NONE;
259         dbuf_attach->priv = attach;
260
261         return 0;
262 }
263
264 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
265         struct dma_buf_attachment *db_attach)
266 {
267         struct vb2_dc_attachment *attach = db_attach->priv;
268         struct sg_table *sgt;
269
270         if (!attach)
271                 return;
272
273         sgt = &attach->sgt;
274
275         /* release the scatterlist cache */
276         if (attach->dma_dir != DMA_NONE)
277                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
278                         attach->dma_dir);
279         sg_free_table(sgt);
280         kfree(attach);
281         db_attach->priv = NULL;
282 }
283
284 static struct sg_table *vb2_dc_dmabuf_ops_map(
285         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
286 {
287         struct vb2_dc_attachment *attach = db_attach->priv;
288         /* stealing dmabuf mutex to serialize map/unmap operations */
289         struct mutex *lock = &db_attach->dmabuf->lock;
290         struct sg_table *sgt;
291
292         mutex_lock(lock);
293
294         sgt = &attach->sgt;
295         /* return previously mapped sg table */
296         if (attach->dma_dir == dma_dir) {
297                 mutex_unlock(lock);
298                 return sgt;
299         }
300
301         /* release any previous cache */
302         if (attach->dma_dir != DMA_NONE) {
303                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
304                         attach->dma_dir);
305                 attach->dma_dir = DMA_NONE;
306         }
307
308         /* mapping to the client with new direction */
309         sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
310                                 dma_dir);
311         if (!sgt->nents) {
312                 pr_err("failed to map scatterlist\n");
313                 mutex_unlock(lock);
314                 return ERR_PTR(-EIO);
315         }
316
317         attach->dma_dir = dma_dir;
318
319         mutex_unlock(lock);
320
321         return sgt;
322 }
323
324 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
325         struct sg_table *sgt, enum dma_data_direction dma_dir)
326 {
327         /* nothing to be done here */
328 }
329
330 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
331 {
332         /* drop reference obtained in vb2_dc_get_dmabuf */
333         vb2_dc_put(dbuf->priv);
334 }
335
336 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
337 {
338         struct vb2_dc_buf *buf = dbuf->priv;
339
340         return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
341 }
342
343 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
344 {
345         struct vb2_dc_buf *buf = dbuf->priv;
346
347         return buf->vaddr;
348 }
349
350 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
351         struct vm_area_struct *vma)
352 {
353         return vb2_dc_mmap(dbuf->priv, vma);
354 }
355
356 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
357         .attach = vb2_dc_dmabuf_ops_attach,
358         .detach = vb2_dc_dmabuf_ops_detach,
359         .map_dma_buf = vb2_dc_dmabuf_ops_map,
360         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
361         .kmap = vb2_dc_dmabuf_ops_kmap,
362         .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
363         .vmap = vb2_dc_dmabuf_ops_vmap,
364         .mmap = vb2_dc_dmabuf_ops_mmap,
365         .release = vb2_dc_dmabuf_ops_release,
366 };
367
368 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
369 {
370         int ret;
371         struct sg_table *sgt;
372
373         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
374         if (!sgt) {
375                 dev_err(buf->dev, "failed to alloc sg table\n");
376                 return NULL;
377         }
378
379         ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
380                 buf->size, &buf->attrs);
381         if (ret < 0) {
382                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
383                 kfree(sgt);
384                 return NULL;
385         }
386
387         return sgt;
388 }
389
390 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
391 {
392         struct vb2_dc_buf *buf = buf_priv;
393         struct dma_buf *dbuf;
394         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
395
396         exp_info.ops = &vb2_dc_dmabuf_ops;
397         exp_info.size = buf->size;
398         exp_info.flags = flags;
399         exp_info.priv = buf;
400
401         if (!buf->sgt_base)
402                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
403
404         if (WARN_ON(!buf->sgt_base))
405                 return NULL;
406
407         dbuf = dma_buf_export(&exp_info);
408         if (IS_ERR(dbuf))
409                 return NULL;
410
411         /* dmabuf keeps reference to vb2 buffer */
412         atomic_inc(&buf->refcount);
413
414         return dbuf;
415 }
416
417 /*********************************************/
418 /*       callbacks for USERPTR buffers       */
419 /*********************************************/
420
421 static void vb2_dc_put_userptr(void *buf_priv)
422 {
423         struct vb2_dc_buf *buf = buf_priv;
424         struct sg_table *sgt = buf->dma_sgt;
425         int i;
426         struct page **pages;
427
428         if (sgt) {
429                 DEFINE_DMA_ATTRS(attrs);
430
431                 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
432                 /*
433                  * No need to sync to CPU, it's already synced to the CPU
434                  * since the finish() memop will have been called before this.
435                  */
436                 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
437                                    buf->dma_dir, &attrs);
438                 pages = frame_vector_pages(buf->vec);
439                 /* sgt should exist only if vector contains pages... */
440                 BUG_ON(IS_ERR(pages));
441                 for (i = 0; i < frame_vector_count(buf->vec); i++)
442                         set_page_dirty_lock(pages[i]);
443                 sg_free_table(sgt);
444                 kfree(sgt);
445         }
446         vb2_destroy_framevec(buf->vec);
447         kfree(buf);
448 }
449
450 /*
451  * For some kind of reserved memory there might be no struct page available,
452  * so all that can be done to support such 'pages' is to try to convert
453  * pfn to dma address or at the last resort just assume that
454  * dma address == physical address (like it has been assumed in earlier version
455  * of videobuf2-dma-contig
456  */
457
458 #ifdef __arch_pfn_to_dma
459 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
460 {
461         return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
462 }
463 #elif defined(__pfn_to_bus)
464 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
465 {
466         return (dma_addr_t)__pfn_to_bus(pfn);
467 }
468 #elif defined(__pfn_to_phys)
469 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
470 {
471         return (dma_addr_t)__pfn_to_phys(pfn);
472 }
473 #else
474 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
475 {
476         /* really, we cannot do anything better at this point */
477         return (dma_addr_t)(pfn) << PAGE_SHIFT;
478 }
479 #endif
480
481 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
482         unsigned long size, enum dma_data_direction dma_dir)
483 {
484         struct vb2_dc_conf *conf = alloc_ctx;
485         struct vb2_dc_buf *buf;
486         struct frame_vector *vec;
487         unsigned long offset;
488         int n_pages, i;
489         int ret = 0;
490         struct sg_table *sgt;
491         unsigned long contig_size;
492         unsigned long dma_align = dma_get_cache_alignment();
493         DEFINE_DMA_ATTRS(attrs);
494
495         dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
496
497         /* Only cache aligned DMA transfers are reliable */
498         if (!IS_ALIGNED(vaddr | size, dma_align)) {
499                 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
500                 return ERR_PTR(-EINVAL);
501         }
502
503         if (!size) {
504                 pr_debug("size is zero\n");
505                 return ERR_PTR(-EINVAL);
506         }
507
508         buf = kzalloc(sizeof *buf, GFP_KERNEL);
509         if (!buf)
510                 return ERR_PTR(-ENOMEM);
511
512         buf->dev = conf->dev;
513         buf->dma_dir = dma_dir;
514
515         offset = vaddr & ~PAGE_MASK;
516         vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
517         if (IS_ERR(vec)) {
518                 ret = PTR_ERR(vec);
519                 goto fail_buf;
520         }
521         buf->vec = vec;
522         n_pages = frame_vector_count(vec);
523         ret = frame_vector_to_pages(vec);
524         if (ret < 0) {
525                 unsigned long *nums = frame_vector_pfns(vec);
526
527                 /*
528                  * Failed to convert to pages... Check the memory is physically
529                  * contiguous and use direct mapping
530                  */
531                 for (i = 1; i < n_pages; i++)
532                         if (nums[i-1] + 1 != nums[i])
533                                 goto fail_pfnvec;
534                 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
535                 goto out;
536         }
537
538         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
539         if (!sgt) {
540                 pr_err("failed to allocate sg table\n");
541                 ret = -ENOMEM;
542                 goto fail_pfnvec;
543         }
544
545         ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
546                 offset, size, GFP_KERNEL);
547         if (ret) {
548                 pr_err("failed to initialize sg table\n");
549                 goto fail_sgt;
550         }
551
552         /*
553          * No need to sync to the device, this will happen later when the
554          * prepare() memop is called.
555          */
556         sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
557                                       buf->dma_dir, &attrs);
558         if (sgt->nents <= 0) {
559                 pr_err("failed to map scatterlist\n");
560                 ret = -EIO;
561                 goto fail_sgt_init;
562         }
563
564         contig_size = vb2_dc_get_contiguous_size(sgt);
565         if (contig_size < size) {
566                 pr_err("contiguous mapping is too small %lu/%lu\n",
567                         contig_size, size);
568                 ret = -EFAULT;
569                 goto fail_map_sg;
570         }
571
572         buf->dma_addr = sg_dma_address(sgt->sgl);
573         buf->dma_sgt = sgt;
574 out:
575         buf->size = size;
576
577         return buf;
578
579 fail_map_sg:
580         dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
581                            buf->dma_dir, &attrs);
582
583 fail_sgt_init:
584         sg_free_table(sgt);
585
586 fail_sgt:
587         kfree(sgt);
588
589 fail_pfnvec:
590         vb2_destroy_framevec(vec);
591
592 fail_buf:
593         kfree(buf);
594
595         return ERR_PTR(ret);
596 }
597
598 /*********************************************/
599 /*       callbacks for DMABUF buffers        */
600 /*********************************************/
601
602 static int vb2_dc_map_dmabuf(void *mem_priv)
603 {
604         struct vb2_dc_buf *buf = mem_priv;
605         struct sg_table *sgt;
606         unsigned long contig_size;
607
608         if (WARN_ON(!buf->db_attach)) {
609                 pr_err("trying to pin a non attached buffer\n");
610                 return -EINVAL;
611         }
612
613         if (WARN_ON(buf->dma_sgt)) {
614                 pr_err("dmabuf buffer is already pinned\n");
615                 return 0;
616         }
617
618         /* get the associated scatterlist for this buffer */
619         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
620         if (IS_ERR(sgt)) {
621                 pr_err("Error getting dmabuf scatterlist\n");
622                 return -EINVAL;
623         }
624
625         /* checking if dmabuf is big enough to store contiguous chunk */
626         contig_size = vb2_dc_get_contiguous_size(sgt);
627         if (contig_size < buf->size) {
628                 pr_err("contiguous chunk is too small %lu/%lu b\n",
629                         contig_size, buf->size);
630                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
631                 return -EFAULT;
632         }
633
634         buf->dma_addr = sg_dma_address(sgt->sgl);
635         buf->dma_sgt = sgt;
636         buf->vaddr = NULL;
637
638         return 0;
639 }
640
641 static void vb2_dc_unmap_dmabuf(void *mem_priv)
642 {
643         struct vb2_dc_buf *buf = mem_priv;
644         struct sg_table *sgt = buf->dma_sgt;
645
646         if (WARN_ON(!buf->db_attach)) {
647                 pr_err("trying to unpin a not attached buffer\n");
648                 return;
649         }
650
651         if (WARN_ON(!sgt)) {
652                 pr_err("dmabuf buffer is already unpinned\n");
653                 return;
654         }
655
656         if (buf->vaddr) {
657                 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
658                 buf->vaddr = NULL;
659         }
660         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
661
662         buf->dma_addr = 0;
663         buf->dma_sgt = NULL;
664 }
665
666 static void vb2_dc_detach_dmabuf(void *mem_priv)
667 {
668         struct vb2_dc_buf *buf = mem_priv;
669
670         /* if vb2 works correctly you should never detach mapped buffer */
671         if (WARN_ON(buf->dma_addr))
672                 vb2_dc_unmap_dmabuf(buf);
673
674         /* detach this attachment */
675         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
676         kfree(buf);
677 }
678
679 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
680         unsigned long size, enum dma_data_direction dma_dir)
681 {
682         struct vb2_dc_conf *conf = alloc_ctx;
683         struct vb2_dc_buf *buf;
684         struct dma_buf_attachment *dba;
685
686         if (dbuf->size < size)
687                 return ERR_PTR(-EFAULT);
688
689         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
690         if (!buf)
691                 return ERR_PTR(-ENOMEM);
692
693         buf->dev = conf->dev;
694         /* create attachment for the dmabuf with the user device */
695         dba = dma_buf_attach(dbuf, buf->dev);
696         if (IS_ERR(dba)) {
697                 pr_err("failed to attach dmabuf\n");
698                 kfree(buf);
699                 return dba;
700         }
701
702         buf->dma_dir = dma_dir;
703         buf->size = size;
704         buf->db_attach = dba;
705
706         return buf;
707 }
708
709 /*********************************************/
710 /*       DMA CONTIG exported functions       */
711 /*********************************************/
712
713 const struct vb2_mem_ops vb2_dma_contig_memops = {
714         .alloc          = vb2_dc_alloc,
715         .put            = vb2_dc_put,
716         .get_dmabuf     = vb2_dc_get_dmabuf,
717         .cookie         = vb2_dc_cookie,
718         .vaddr          = vb2_dc_vaddr,
719         .mmap           = vb2_dc_mmap,
720         .get_userptr    = vb2_dc_get_userptr,
721         .put_userptr    = vb2_dc_put_userptr,
722         .prepare        = vb2_dc_prepare,
723         .finish         = vb2_dc_finish,
724         .map_dmabuf     = vb2_dc_map_dmabuf,
725         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
726         .attach_dmabuf  = vb2_dc_attach_dmabuf,
727         .detach_dmabuf  = vb2_dc_detach_dmabuf,
728         .num_users      = vb2_dc_num_users,
729 };
730 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
731
732 void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
733                                     struct dma_attrs *attrs)
734 {
735         struct vb2_dc_conf *conf;
736
737         conf = kzalloc(sizeof *conf, GFP_KERNEL);
738         if (!conf)
739                 return ERR_PTR(-ENOMEM);
740
741         conf->dev = dev;
742         if (attrs)
743                 conf->attrs = *attrs;
744
745         return conf;
746 }
747 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx_attrs);
748
749 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
750 {
751         if (!IS_ERR_OR_NULL(alloc_ctx))
752                 kfree(alloc_ctx);
753 }
754 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
755
756 /**
757  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
758  * @dev:        device for configuring DMA parameters
759  * @size:       size of DMA max segment size to set
760  *
761  * To allow mapping the scatter-list into a single chunk in the DMA
762  * address space, the device is required to have the DMA max segment
763  * size parameter set to a value larger than the buffer size. Otherwise,
764  * the DMA-mapping subsystem will split the mapping into max segment
765  * size chunks. This function sets the DMA max segment size
766  * parameter to let DMA-mapping map a buffer as a single chunk in DMA
767  * address space.
768  * This code assumes that the DMA-mapping subsystem will merge all
769  * scatterlist segments if this is really possible (for example when
770  * an IOMMU is available and enabled).
771  * Ideally, this parameter should be set by the generic bus code, but it
772  * is left with the default 64KiB value due to historical litmiations in
773  * other subsystems (like limited USB host drivers) and there no good
774  * place to set it to the proper value.
775  * This function should be called from the drivers, which are known to
776  * operate on platforms with IOMMU and provide access to shared buffers
777  * (either USERPTR or DMABUF). This should be done before initializing
778  * videobuf2 queue.
779  */
780 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
781 {
782         if (!dev->dma_parms) {
783                 dev->dma_parms = kzalloc(sizeof(dev->dma_parms), GFP_KERNEL);
784                 if (!dev->dma_parms)
785                         return -ENOMEM;
786         }
787         if (dma_get_max_seg_size(dev) < size)
788                 return dma_set_max_seg_size(dev, size);
789
790         return 0;
791 }
792 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
793
794 /*
795  * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
796  * @dev:        device for configuring DMA parameters
797  *
798  * This function releases resources allocated to configure DMA parameters
799  * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
800  * device drivers on driver remove.
801  */
802 void vb2_dma_contig_clear_max_seg_size(struct device *dev)
803 {
804         kfree(dev->dma_parms);
805         dev->dma_parms = NULL;
806 }
807 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
808
809 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
810 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
811 MODULE_LICENSE("GPL");