Merge tag 'armsoc-arm64' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[cascardo/linux.git] / drivers / dma-buf / dma-buf.c
1 /*
2  * Framework for buffer objects that can be shared across devices/subsystems.
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Author: Sumit Semwal <sumit.semwal@ti.com>
6  *
7  * Many thanks to linaro-mm-sig list, and specially
8  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10  * refining of this idea.
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License version 2 as published by
14  * the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
19  * more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * this program.  If not, see <http://www.gnu.org/licenses/>.
23  */
24
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/fence.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/export.h>
31 #include <linux/debugfs.h>
32 #include <linux/module.h>
33 #include <linux/seq_file.h>
34 #include <linux/poll.h>
35 #include <linux/reservation.h>
36 #include <linux/mm.h>
37
38 #include <uapi/linux/dma-buf.h>
39
40 static inline int is_dma_buf_file(struct file *);
41
42 struct dma_buf_list {
43         struct list_head head;
44         struct mutex lock;
45 };
46
47 static struct dma_buf_list db_list;
48
49 static int dma_buf_release(struct inode *inode, struct file *file)
50 {
51         struct dma_buf *dmabuf;
52
53         if (!is_dma_buf_file(file))
54                 return -EINVAL;
55
56         dmabuf = file->private_data;
57
58         BUG_ON(dmabuf->vmapping_counter);
59
60         /*
61          * Any fences that a dma-buf poll can wait on should be signaled
62          * before releasing dma-buf. This is the responsibility of each
63          * driver that uses the reservation objects.
64          *
65          * If you hit this BUG() it means someone dropped their ref to the
66          * dma-buf while still having pending operation to the buffer.
67          */
68         BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
69
70         dmabuf->ops->release(dmabuf);
71
72         mutex_lock(&db_list.lock);
73         list_del(&dmabuf->list_node);
74         mutex_unlock(&db_list.lock);
75
76         if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
77                 reservation_object_fini(dmabuf->resv);
78
79         module_put(dmabuf->owner);
80         kfree(dmabuf);
81         return 0;
82 }
83
84 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
85 {
86         struct dma_buf *dmabuf;
87
88         if (!is_dma_buf_file(file))
89                 return -EINVAL;
90
91         dmabuf = file->private_data;
92
93         /* check for overflowing the buffer's size */
94         if (vma->vm_pgoff + vma_pages(vma) >
95             dmabuf->size >> PAGE_SHIFT)
96                 return -EINVAL;
97
98         return dmabuf->ops->mmap(dmabuf, vma);
99 }
100
101 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
102 {
103         struct dma_buf *dmabuf;
104         loff_t base;
105
106         if (!is_dma_buf_file(file))
107                 return -EBADF;
108
109         dmabuf = file->private_data;
110
111         /* only support discovering the end of the buffer,
112            but also allow SEEK_SET to maintain the idiomatic
113            SEEK_END(0), SEEK_CUR(0) pattern */
114         if (whence == SEEK_END)
115                 base = dmabuf->size;
116         else if (whence == SEEK_SET)
117                 base = 0;
118         else
119                 return -EINVAL;
120
121         if (offset != 0)
122                 return -EINVAL;
123
124         return base + offset;
125 }
126
127 static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
128 {
129         struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
130         unsigned long flags;
131
132         spin_lock_irqsave(&dcb->poll->lock, flags);
133         wake_up_locked_poll(dcb->poll, dcb->active);
134         dcb->active = 0;
135         spin_unlock_irqrestore(&dcb->poll->lock, flags);
136 }
137
138 static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
139 {
140         struct dma_buf *dmabuf;
141         struct reservation_object *resv;
142         struct reservation_object_list *fobj;
143         struct fence *fence_excl;
144         unsigned long events;
145         unsigned shared_count, seq;
146
147         dmabuf = file->private_data;
148         if (!dmabuf || !dmabuf->resv)
149                 return POLLERR;
150
151         resv = dmabuf->resv;
152
153         poll_wait(file, &dmabuf->poll, poll);
154
155         events = poll_requested_events(poll) & (POLLIN | POLLOUT);
156         if (!events)
157                 return 0;
158
159 retry:
160         seq = read_seqcount_begin(&resv->seq);
161         rcu_read_lock();
162
163         fobj = rcu_dereference(resv->fence);
164         if (fobj)
165                 shared_count = fobj->shared_count;
166         else
167                 shared_count = 0;
168         fence_excl = rcu_dereference(resv->fence_excl);
169         if (read_seqcount_retry(&resv->seq, seq)) {
170                 rcu_read_unlock();
171                 goto retry;
172         }
173
174         if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
175                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
176                 unsigned long pevents = POLLIN;
177
178                 if (shared_count == 0)
179                         pevents |= POLLOUT;
180
181                 spin_lock_irq(&dmabuf->poll.lock);
182                 if (dcb->active) {
183                         dcb->active |= pevents;
184                         events &= ~pevents;
185                 } else
186                         dcb->active = pevents;
187                 spin_unlock_irq(&dmabuf->poll.lock);
188
189                 if (events & pevents) {
190                         if (!fence_get_rcu(fence_excl)) {
191                                 /* force a recheck */
192                                 events &= ~pevents;
193                                 dma_buf_poll_cb(NULL, &dcb->cb);
194                         } else if (!fence_add_callback(fence_excl, &dcb->cb,
195                                                        dma_buf_poll_cb)) {
196                                 events &= ~pevents;
197                                 fence_put(fence_excl);
198                         } else {
199                                 /*
200                                  * No callback queued, wake up any additional
201                                  * waiters.
202                                  */
203                                 fence_put(fence_excl);
204                                 dma_buf_poll_cb(NULL, &dcb->cb);
205                         }
206                 }
207         }
208
209         if ((events & POLLOUT) && shared_count > 0) {
210                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
211                 int i;
212
213                 /* Only queue a new callback if no event has fired yet */
214                 spin_lock_irq(&dmabuf->poll.lock);
215                 if (dcb->active)
216                         events &= ~POLLOUT;
217                 else
218                         dcb->active = POLLOUT;
219                 spin_unlock_irq(&dmabuf->poll.lock);
220
221                 if (!(events & POLLOUT))
222                         goto out;
223
224                 for (i = 0; i < shared_count; ++i) {
225                         struct fence *fence = rcu_dereference(fobj->shared[i]);
226
227                         if (!fence_get_rcu(fence)) {
228                                 /*
229                                  * fence refcount dropped to zero, this means
230                                  * that fobj has been freed
231                                  *
232                                  * call dma_buf_poll_cb and force a recheck!
233                                  */
234                                 events &= ~POLLOUT;
235                                 dma_buf_poll_cb(NULL, &dcb->cb);
236                                 break;
237                         }
238                         if (!fence_add_callback(fence, &dcb->cb,
239                                                 dma_buf_poll_cb)) {
240                                 fence_put(fence);
241                                 events &= ~POLLOUT;
242                                 break;
243                         }
244                         fence_put(fence);
245                 }
246
247                 /* No callback queued, wake up any additional waiters. */
248                 if (i == shared_count)
249                         dma_buf_poll_cb(NULL, &dcb->cb);
250         }
251
252 out:
253         rcu_read_unlock();
254         return events;
255 }
256
257 static long dma_buf_ioctl(struct file *file,
258                           unsigned int cmd, unsigned long arg)
259 {
260         struct dma_buf *dmabuf;
261         struct dma_buf_sync sync;
262         enum dma_data_direction direction;
263         int ret;
264
265         dmabuf = file->private_data;
266
267         switch (cmd) {
268         case DMA_BUF_IOCTL_SYNC:
269                 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
270                         return -EFAULT;
271
272                 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
273                         return -EINVAL;
274
275                 switch (sync.flags & DMA_BUF_SYNC_RW) {
276                 case DMA_BUF_SYNC_READ:
277                         direction = DMA_FROM_DEVICE;
278                         break;
279                 case DMA_BUF_SYNC_WRITE:
280                         direction = DMA_TO_DEVICE;
281                         break;
282                 case DMA_BUF_SYNC_RW:
283                         direction = DMA_BIDIRECTIONAL;
284                         break;
285                 default:
286                         return -EINVAL;
287                 }
288
289                 if (sync.flags & DMA_BUF_SYNC_END)
290                         ret = dma_buf_end_cpu_access(dmabuf, direction);
291                 else
292                         ret = dma_buf_begin_cpu_access(dmabuf, direction);
293
294                 return ret;
295         default:
296                 return -ENOTTY;
297         }
298 }
299
300 static const struct file_operations dma_buf_fops = {
301         .release        = dma_buf_release,
302         .mmap           = dma_buf_mmap_internal,
303         .llseek         = dma_buf_llseek,
304         .poll           = dma_buf_poll,
305         .unlocked_ioctl = dma_buf_ioctl,
306 };
307
308 /*
309  * is_dma_buf_file - Check if struct file* is associated with dma_buf
310  */
311 static inline int is_dma_buf_file(struct file *file)
312 {
313         return file->f_op == &dma_buf_fops;
314 }
315
316 /**
317  * dma_buf_export - Creates a new dma_buf, and associates an anon file
318  * with this buffer, so it can be exported.
319  * Also connect the allocator specific data and ops to the buffer.
320  * Additionally, provide a name string for exporter; useful in debugging.
321  *
322  * @exp_info:   [in]    holds all the export related information provided
323  *                      by the exporter. see struct dma_buf_export_info
324  *                      for further details.
325  *
326  * Returns, on success, a newly created dma_buf object, which wraps the
327  * supplied private data and operations for dma_buf_ops. On either missing
328  * ops, or error in allocating struct dma_buf, will return negative error.
329  *
330  */
331 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
332 {
333         struct dma_buf *dmabuf;
334         struct reservation_object *resv = exp_info->resv;
335         struct file *file;
336         size_t alloc_size = sizeof(struct dma_buf);
337         int ret;
338
339         if (!exp_info->resv)
340                 alloc_size += sizeof(struct reservation_object);
341         else
342                 /* prevent &dma_buf[1] == dma_buf->resv */
343                 alloc_size += 1;
344
345         if (WARN_ON(!exp_info->priv
346                           || !exp_info->ops
347                           || !exp_info->ops->map_dma_buf
348                           || !exp_info->ops->unmap_dma_buf
349                           || !exp_info->ops->release
350                           || !exp_info->ops->kmap_atomic
351                           || !exp_info->ops->kmap
352                           || !exp_info->ops->mmap)) {
353                 return ERR_PTR(-EINVAL);
354         }
355
356         if (!try_module_get(exp_info->owner))
357                 return ERR_PTR(-ENOENT);
358
359         dmabuf = kzalloc(alloc_size, GFP_KERNEL);
360         if (!dmabuf) {
361                 ret = -ENOMEM;
362                 goto err_module;
363         }
364
365         dmabuf->priv = exp_info->priv;
366         dmabuf->ops = exp_info->ops;
367         dmabuf->size = exp_info->size;
368         dmabuf->exp_name = exp_info->exp_name;
369         dmabuf->owner = exp_info->owner;
370         init_waitqueue_head(&dmabuf->poll);
371         dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
372         dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
373
374         if (!resv) {
375                 resv = (struct reservation_object *)&dmabuf[1];
376                 reservation_object_init(resv);
377         }
378         dmabuf->resv = resv;
379
380         file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
381                                         exp_info->flags);
382         if (IS_ERR(file)) {
383                 ret = PTR_ERR(file);
384                 goto err_dmabuf;
385         }
386
387         file->f_mode |= FMODE_LSEEK;
388         dmabuf->file = file;
389
390         mutex_init(&dmabuf->lock);
391         INIT_LIST_HEAD(&dmabuf->attachments);
392
393         mutex_lock(&db_list.lock);
394         list_add(&dmabuf->list_node, &db_list.head);
395         mutex_unlock(&db_list.lock);
396
397         return dmabuf;
398
399 err_dmabuf:
400         kfree(dmabuf);
401 err_module:
402         module_put(exp_info->owner);
403         return ERR_PTR(ret);
404 }
405 EXPORT_SYMBOL_GPL(dma_buf_export);
406
407 /**
408  * dma_buf_fd - returns a file descriptor for the given dma_buf
409  * @dmabuf:     [in]    pointer to dma_buf for which fd is required.
410  * @flags:      [in]    flags to give to fd
411  *
412  * On success, returns an associated 'fd'. Else, returns error.
413  */
414 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
415 {
416         int fd;
417
418         if (!dmabuf || !dmabuf->file)
419                 return -EINVAL;
420
421         fd = get_unused_fd_flags(flags);
422         if (fd < 0)
423                 return fd;
424
425         fd_install(fd, dmabuf->file);
426
427         return fd;
428 }
429 EXPORT_SYMBOL_GPL(dma_buf_fd);
430
431 /**
432  * dma_buf_get - returns the dma_buf structure related to an fd
433  * @fd: [in]    fd associated with the dma_buf to be returned
434  *
435  * On success, returns the dma_buf structure associated with an fd; uses
436  * file's refcounting done by fget to increase refcount. returns ERR_PTR
437  * otherwise.
438  */
439 struct dma_buf *dma_buf_get(int fd)
440 {
441         struct file *file;
442
443         file = fget(fd);
444
445         if (!file)
446                 return ERR_PTR(-EBADF);
447
448         if (!is_dma_buf_file(file)) {
449                 fput(file);
450                 return ERR_PTR(-EINVAL);
451         }
452
453         return file->private_data;
454 }
455 EXPORT_SYMBOL_GPL(dma_buf_get);
456
457 /**
458  * dma_buf_put - decreases refcount of the buffer
459  * @dmabuf:     [in]    buffer to reduce refcount of
460  *
461  * Uses file's refcounting done implicitly by fput()
462  */
463 void dma_buf_put(struct dma_buf *dmabuf)
464 {
465         if (WARN_ON(!dmabuf || !dmabuf->file))
466                 return;
467
468         fput(dmabuf->file);
469 }
470 EXPORT_SYMBOL_GPL(dma_buf_put);
471
472 /**
473  * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
474  * calls attach() of dma_buf_ops to allow device-specific attach functionality
475  * @dmabuf:     [in]    buffer to attach device to.
476  * @dev:        [in]    device to be attached.
477  *
478  * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
479  * error.
480  */
481 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
482                                           struct device *dev)
483 {
484         struct dma_buf_attachment *attach;
485         int ret;
486
487         if (WARN_ON(!dmabuf || !dev))
488                 return ERR_PTR(-EINVAL);
489
490         attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
491         if (attach == NULL)
492                 return ERR_PTR(-ENOMEM);
493
494         attach->dev = dev;
495         attach->dmabuf = dmabuf;
496
497         mutex_lock(&dmabuf->lock);
498
499         if (dmabuf->ops->attach) {
500                 ret = dmabuf->ops->attach(dmabuf, dev, attach);
501                 if (ret)
502                         goto err_attach;
503         }
504         list_add(&attach->node, &dmabuf->attachments);
505
506         mutex_unlock(&dmabuf->lock);
507         return attach;
508
509 err_attach:
510         kfree(attach);
511         mutex_unlock(&dmabuf->lock);
512         return ERR_PTR(ret);
513 }
514 EXPORT_SYMBOL_GPL(dma_buf_attach);
515
516 /**
517  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
518  * optionally calls detach() of dma_buf_ops for device-specific detach
519  * @dmabuf:     [in]    buffer to detach from.
520  * @attach:     [in]    attachment to be detached; is free'd after this call.
521  *
522  */
523 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
524 {
525         if (WARN_ON(!dmabuf || !attach))
526                 return;
527
528         mutex_lock(&dmabuf->lock);
529         list_del(&attach->node);
530         if (dmabuf->ops->detach)
531                 dmabuf->ops->detach(dmabuf, attach);
532
533         mutex_unlock(&dmabuf->lock);
534         kfree(attach);
535 }
536 EXPORT_SYMBOL_GPL(dma_buf_detach);
537
538 /**
539  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
540  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
541  * dma_buf_ops.
542  * @attach:     [in]    attachment whose scatterlist is to be returned
543  * @direction:  [in]    direction of DMA transfer
544  *
545  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
546  * on error.
547  */
548 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
549                                         enum dma_data_direction direction)
550 {
551         struct sg_table *sg_table = ERR_PTR(-EINVAL);
552
553         might_sleep();
554
555         if (WARN_ON(!attach || !attach->dmabuf))
556                 return ERR_PTR(-EINVAL);
557
558         sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
559         if (!sg_table)
560                 sg_table = ERR_PTR(-ENOMEM);
561
562         return sg_table;
563 }
564 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
565
566 /**
567  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
568  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
569  * dma_buf_ops.
570  * @attach:     [in]    attachment to unmap buffer from
571  * @sg_table:   [in]    scatterlist info of the buffer to unmap
572  * @direction:  [in]    direction of DMA transfer
573  *
574  */
575 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
576                                 struct sg_table *sg_table,
577                                 enum dma_data_direction direction)
578 {
579         might_sleep();
580
581         if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
582                 return;
583
584         attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
585                                                 direction);
586 }
587 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
588
589
590 /**
591  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
592  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
593  * preparations. Coherency is only guaranteed in the specified range for the
594  * specified access direction.
595  * @dmabuf:     [in]    buffer to prepare cpu access for.
596  * @direction:  [in]    length of range for cpu access.
597  *
598  * Can return negative error values, returns 0 on success.
599  */
600 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
601                              enum dma_data_direction direction)
602 {
603         int ret = 0;
604
605         if (WARN_ON(!dmabuf))
606                 return -EINVAL;
607
608         if (dmabuf->ops->begin_cpu_access)
609                 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
610
611         return ret;
612 }
613 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
614
615 /**
616  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
617  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
618  * actions. Coherency is only guaranteed in the specified range for the
619  * specified access direction.
620  * @dmabuf:     [in]    buffer to complete cpu access for.
621  * @direction:  [in]    length of range for cpu access.
622  *
623  * Can return negative error values, returns 0 on success.
624  */
625 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
626                            enum dma_data_direction direction)
627 {
628         int ret = 0;
629
630         WARN_ON(!dmabuf);
631
632         if (dmabuf->ops->end_cpu_access)
633                 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
634
635         return ret;
636 }
637 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
638
639 /**
640  * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
641  * space. The same restrictions as for kmap_atomic and friends apply.
642  * @dmabuf:     [in]    buffer to map page from.
643  * @page_num:   [in]    page in PAGE_SIZE units to map.
644  *
645  * This call must always succeed, any necessary preparations that might fail
646  * need to be done in begin_cpu_access.
647  */
648 void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
649 {
650         WARN_ON(!dmabuf);
651
652         return dmabuf->ops->kmap_atomic(dmabuf, page_num);
653 }
654 EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
655
656 /**
657  * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
658  * @dmabuf:     [in]    buffer to unmap page from.
659  * @page_num:   [in]    page in PAGE_SIZE units to unmap.
660  * @vaddr:      [in]    kernel space pointer obtained from dma_buf_kmap_atomic.
661  *
662  * This call must always succeed.
663  */
664 void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
665                            void *vaddr)
666 {
667         WARN_ON(!dmabuf);
668
669         if (dmabuf->ops->kunmap_atomic)
670                 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
671 }
672 EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
673
674 /**
675  * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
676  * same restrictions as for kmap and friends apply.
677  * @dmabuf:     [in]    buffer to map page from.
678  * @page_num:   [in]    page in PAGE_SIZE units to map.
679  *
680  * This call must always succeed, any necessary preparations that might fail
681  * need to be done in begin_cpu_access.
682  */
683 void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
684 {
685         WARN_ON(!dmabuf);
686
687         return dmabuf->ops->kmap(dmabuf, page_num);
688 }
689 EXPORT_SYMBOL_GPL(dma_buf_kmap);
690
691 /**
692  * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
693  * @dmabuf:     [in]    buffer to unmap page from.
694  * @page_num:   [in]    page in PAGE_SIZE units to unmap.
695  * @vaddr:      [in]    kernel space pointer obtained from dma_buf_kmap.
696  *
697  * This call must always succeed.
698  */
699 void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
700                     void *vaddr)
701 {
702         WARN_ON(!dmabuf);
703
704         if (dmabuf->ops->kunmap)
705                 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
706 }
707 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
708
709
710 /**
711  * dma_buf_mmap - Setup up a userspace mmap with the given vma
712  * @dmabuf:     [in]    buffer that should back the vma
713  * @vma:        [in]    vma for the mmap
714  * @pgoff:      [in]    offset in pages where this mmap should start within the
715  *                      dma-buf buffer.
716  *
717  * This function adjusts the passed in vma so that it points at the file of the
718  * dma_buf operation. It also adjusts the starting pgoff and does bounds
719  * checking on the size of the vma. Then it calls the exporters mmap function to
720  * set up the mapping.
721  *
722  * Can return negative error values, returns 0 on success.
723  */
724 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
725                  unsigned long pgoff)
726 {
727         struct file *oldfile;
728         int ret;
729
730         if (WARN_ON(!dmabuf || !vma))
731                 return -EINVAL;
732
733         /* check for offset overflow */
734         if (pgoff + vma_pages(vma) < pgoff)
735                 return -EOVERFLOW;
736
737         /* check for overflowing the buffer's size */
738         if (pgoff + vma_pages(vma) >
739             dmabuf->size >> PAGE_SHIFT)
740                 return -EINVAL;
741
742         /* readjust the vma */
743         get_file(dmabuf->file);
744         oldfile = vma->vm_file;
745         vma->vm_file = dmabuf->file;
746         vma->vm_pgoff = pgoff;
747
748         ret = dmabuf->ops->mmap(dmabuf, vma);
749         if (ret) {
750                 /* restore old parameters on failure */
751                 vma->vm_file = oldfile;
752                 fput(dmabuf->file);
753         } else {
754                 if (oldfile)
755                         fput(oldfile);
756         }
757         return ret;
758
759 }
760 EXPORT_SYMBOL_GPL(dma_buf_mmap);
761
762 /**
763  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
764  * address space. Same restrictions as for vmap and friends apply.
765  * @dmabuf:     [in]    buffer to vmap
766  *
767  * This call may fail due to lack of virtual mapping address space.
768  * These calls are optional in drivers. The intended use for them
769  * is for mapping objects linear in kernel space for high use objects.
770  * Please attempt to use kmap/kunmap before thinking about these interfaces.
771  *
772  * Returns NULL on error.
773  */
774 void *dma_buf_vmap(struct dma_buf *dmabuf)
775 {
776         void *ptr;
777
778         if (WARN_ON(!dmabuf))
779                 return NULL;
780
781         if (!dmabuf->ops->vmap)
782                 return NULL;
783
784         mutex_lock(&dmabuf->lock);
785         if (dmabuf->vmapping_counter) {
786                 dmabuf->vmapping_counter++;
787                 BUG_ON(!dmabuf->vmap_ptr);
788                 ptr = dmabuf->vmap_ptr;
789                 goto out_unlock;
790         }
791
792         BUG_ON(dmabuf->vmap_ptr);
793
794         ptr = dmabuf->ops->vmap(dmabuf);
795         if (WARN_ON_ONCE(IS_ERR(ptr)))
796                 ptr = NULL;
797         if (!ptr)
798                 goto out_unlock;
799
800         dmabuf->vmap_ptr = ptr;
801         dmabuf->vmapping_counter = 1;
802
803 out_unlock:
804         mutex_unlock(&dmabuf->lock);
805         return ptr;
806 }
807 EXPORT_SYMBOL_GPL(dma_buf_vmap);
808
809 /**
810  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
811  * @dmabuf:     [in]    buffer to vunmap
812  * @vaddr:      [in]    vmap to vunmap
813  */
814 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
815 {
816         if (WARN_ON(!dmabuf))
817                 return;
818
819         BUG_ON(!dmabuf->vmap_ptr);
820         BUG_ON(dmabuf->vmapping_counter == 0);
821         BUG_ON(dmabuf->vmap_ptr != vaddr);
822
823         mutex_lock(&dmabuf->lock);
824         if (--dmabuf->vmapping_counter == 0) {
825                 if (dmabuf->ops->vunmap)
826                         dmabuf->ops->vunmap(dmabuf, vaddr);
827                 dmabuf->vmap_ptr = NULL;
828         }
829         mutex_unlock(&dmabuf->lock);
830 }
831 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
832
833 #ifdef CONFIG_DEBUG_FS
834 static int dma_buf_debug_show(struct seq_file *s, void *unused)
835 {
836         int ret;
837         struct dma_buf *buf_obj;
838         struct dma_buf_attachment *attach_obj;
839         int count = 0, attach_count;
840         size_t size = 0;
841
842         ret = mutex_lock_interruptible(&db_list.lock);
843
844         if (ret)
845                 return ret;
846
847         seq_puts(s, "\nDma-buf Objects:\n");
848         seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
849
850         list_for_each_entry(buf_obj, &db_list.head, list_node) {
851                 ret = mutex_lock_interruptible(&buf_obj->lock);
852
853                 if (ret) {
854                         seq_puts(s,
855                                  "\tERROR locking buffer object: skipping\n");
856                         continue;
857                 }
858
859                 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
860                                 buf_obj->size,
861                                 buf_obj->file->f_flags, buf_obj->file->f_mode,
862                                 file_count(buf_obj->file),
863                                 buf_obj->exp_name);
864
865                 seq_puts(s, "\tAttached Devices:\n");
866                 attach_count = 0;
867
868                 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
869                         seq_puts(s, "\t");
870
871                         seq_printf(s, "%s\n", dev_name(attach_obj->dev));
872                         attach_count++;
873                 }
874
875                 seq_printf(s, "Total %d devices attached\n\n",
876                                 attach_count);
877
878                 count++;
879                 size += buf_obj->size;
880                 mutex_unlock(&buf_obj->lock);
881         }
882
883         seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
884
885         mutex_unlock(&db_list.lock);
886         return 0;
887 }
888
889 static int dma_buf_debug_open(struct inode *inode, struct file *file)
890 {
891         return single_open(file, dma_buf_debug_show, NULL);
892 }
893
894 static const struct file_operations dma_buf_debug_fops = {
895         .open           = dma_buf_debug_open,
896         .read           = seq_read,
897         .llseek         = seq_lseek,
898         .release        = single_release,
899 };
900
901 static struct dentry *dma_buf_debugfs_dir;
902
903 static int dma_buf_init_debugfs(void)
904 {
905         struct dentry *d;
906         int err = 0;
907
908         d = debugfs_create_dir("dma_buf", NULL);
909         if (IS_ERR(d))
910                 return PTR_ERR(d);
911
912         dma_buf_debugfs_dir = d;
913
914         d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
915                                 NULL, &dma_buf_debug_fops);
916         if (IS_ERR(d)) {
917                 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
918                 debugfs_remove_recursive(dma_buf_debugfs_dir);
919                 dma_buf_debugfs_dir = NULL;
920                 err = PTR_ERR(d);
921         }
922
923         return err;
924 }
925
926 static void dma_buf_uninit_debugfs(void)
927 {
928         if (dma_buf_debugfs_dir)
929                 debugfs_remove_recursive(dma_buf_debugfs_dir);
930 }
931 #else
932 static inline int dma_buf_init_debugfs(void)
933 {
934         return 0;
935 }
936 static inline void dma_buf_uninit_debugfs(void)
937 {
938 }
939 #endif
940
941 static int __init dma_buf_init(void)
942 {
943         mutex_init(&db_list.lock);
944         INIT_LIST_HEAD(&db_list.head);
945         dma_buf_init_debugfs();
946         return 0;
947 }
948 subsys_initcall(dma_buf_init);
949
950 static void __exit dma_buf_deinit(void)
951 {
952         dma_buf_uninit_debugfs();
953 }
954 __exitcall(dma_buf_deinit);