Merge tag 'driver-core-4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
45 {
46         return (buffer->flags & ION_FLAG_CACHED) &&
47                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
48 }
49
50 bool ion_buffer_cached(struct ion_buffer *buffer)
51 {
52         return !!(buffer->flags & ION_FLAG_CACHED);
53 }
54
55 static inline struct page *ion_buffer_page(struct page *page)
56 {
57         return (struct page *)((unsigned long)page & ~(1UL));
58 }
59
60 static inline bool ion_buffer_page_is_dirty(struct page *page)
61 {
62         return !!((unsigned long)page & 1UL);
63 }
64
65 static inline void ion_buffer_page_dirty(struct page **page)
66 {
67         *page = (struct page *)((unsigned long)(*page) | 1UL);
68 }
69
70 static inline void ion_buffer_page_clean(struct page **page)
71 {
72         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
73 }
74
75 /* this function should only be called while dev->lock is held */
76 static void ion_buffer_add(struct ion_device *dev,
77                            struct ion_buffer *buffer)
78 {
79         struct rb_node **p = &dev->buffers.rb_node;
80         struct rb_node *parent = NULL;
81         struct ion_buffer *entry;
82
83         while (*p) {
84                 parent = *p;
85                 entry = rb_entry(parent, struct ion_buffer, node);
86
87                 if (buffer < entry) {
88                         p = &(*p)->rb_left;
89                 } else if (buffer > entry) {
90                         p = &(*p)->rb_right;
91                 } else {
92                         pr_err("%s: buffer already found.", __func__);
93                         BUG();
94                 }
95         }
96
97         rb_link_node(&buffer->node, parent, p);
98         rb_insert_color(&buffer->node, &dev->buffers);
99 }
100
101 /* this function should only be called while dev->lock is held */
102 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
103                                             struct ion_device *dev,
104                                             unsigned long len,
105                                             unsigned long align,
106                                             unsigned long flags)
107 {
108         struct ion_buffer *buffer;
109         struct sg_table *table;
110         struct scatterlist *sg;
111         int i, ret;
112
113         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
114         if (!buffer)
115                 return ERR_PTR(-ENOMEM);
116
117         buffer->heap = heap;
118         buffer->flags = flags;
119         kref_init(&buffer->ref);
120
121         ret = heap->ops->allocate(heap, buffer, len, align, flags);
122
123         if (ret) {
124                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
125                         goto err2;
126
127                 ion_heap_freelist_drain(heap, 0);
128                 ret = heap->ops->allocate(heap, buffer, len, align,
129                                           flags);
130                 if (ret)
131                         goto err2;
132         }
133
134         if (buffer->sg_table == NULL) {
135                 WARN_ONCE(1, "This heap needs to set the sgtable");
136                 ret = -EINVAL;
137                 goto err1;
138         }
139
140         table = buffer->sg_table;
141         buffer->dev = dev;
142         buffer->size = len;
143
144         if (ion_buffer_fault_user_mappings(buffer)) {
145                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
146                 struct scatterlist *sg;
147                 int i, j, k = 0;
148
149                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
150                 if (!buffer->pages) {
151                         ret = -ENOMEM;
152                         goto err1;
153                 }
154
155                 for_each_sg(table->sgl, sg, table->nents, i) {
156                         struct page *page = sg_page(sg);
157
158                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
159                                 buffer->pages[k++] = page++;
160                 }
161         }
162
163         buffer->dev = dev;
164         buffer->size = len;
165         INIT_LIST_HEAD(&buffer->vmas);
166         mutex_init(&buffer->lock);
167         /*
168          * this will set up dma addresses for the sglist -- it is not
169          * technically correct as per the dma api -- a specific
170          * device isn't really taking ownership here.  However, in practice on
171          * our systems the only dma_address space is physical addresses.
172          * Additionally, we can't afford the overhead of invalidating every
173          * allocation via dma_map_sg. The implicit contract here is that
174          * memory coming from the heaps is ready for dma, ie if it has a
175          * cached mapping that mapping has been invalidated
176          */
177         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
178                 sg_dma_address(sg) = sg_phys(sg);
179                 sg_dma_len(sg) = sg->length;
180         }
181         mutex_lock(&dev->buffer_lock);
182         ion_buffer_add(dev, buffer);
183         mutex_unlock(&dev->buffer_lock);
184         return buffer;
185
186 err1:
187         heap->ops->free(buffer);
188 err2:
189         kfree(buffer);
190         return ERR_PTR(ret);
191 }
192
193 void ion_buffer_destroy(struct ion_buffer *buffer)
194 {
195         if (WARN_ON(buffer->kmap_cnt > 0))
196                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
197         buffer->heap->ops->free(buffer);
198         vfree(buffer->pages);
199         kfree(buffer);
200 }
201
202 static void _ion_buffer_destroy(struct kref *kref)
203 {
204         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
205         struct ion_heap *heap = buffer->heap;
206         struct ion_device *dev = buffer->dev;
207
208         mutex_lock(&dev->buffer_lock);
209         rb_erase(&buffer->node, &dev->buffers);
210         mutex_unlock(&dev->buffer_lock);
211
212         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
213                 ion_heap_freelist_add(heap, buffer);
214         else
215                 ion_buffer_destroy(buffer);
216 }
217
218 static void ion_buffer_get(struct ion_buffer *buffer)
219 {
220         kref_get(&buffer->ref);
221 }
222
223 static int ion_buffer_put(struct ion_buffer *buffer)
224 {
225         return kref_put(&buffer->ref, _ion_buffer_destroy);
226 }
227
228 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
229 {
230         mutex_lock(&buffer->lock);
231         buffer->handle_count++;
232         mutex_unlock(&buffer->lock);
233 }
234
235 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
236 {
237         /*
238          * when a buffer is removed from a handle, if it is not in
239          * any other handles, copy the taskcomm and the pid of the
240          * process it's being removed from into the buffer.  At this
241          * point there will be no way to track what processes this buffer is
242          * being used by, it only exists as a dma_buf file descriptor.
243          * The taskcomm and pid can provide a debug hint as to where this fd
244          * is in the system
245          */
246         mutex_lock(&buffer->lock);
247         buffer->handle_count--;
248         BUG_ON(buffer->handle_count < 0);
249         if (!buffer->handle_count) {
250                 struct task_struct *task;
251
252                 task = current->group_leader;
253                 get_task_comm(buffer->task_comm, task);
254                 buffer->pid = task_pid_nr(task);
255         }
256         mutex_unlock(&buffer->lock);
257 }
258
259 static struct ion_handle *ion_handle_create(struct ion_client *client,
260                                             struct ion_buffer *buffer)
261 {
262         struct ion_handle *handle;
263
264         handle = kzalloc(sizeof(*handle), GFP_KERNEL);
265         if (!handle)
266                 return ERR_PTR(-ENOMEM);
267         kref_init(&handle->ref);
268         RB_CLEAR_NODE(&handle->node);
269         handle->client = client;
270         ion_buffer_get(buffer);
271         ion_buffer_add_to_handle(buffer);
272         handle->buffer = buffer;
273
274         return handle;
275 }
276
277 static void ion_handle_kmap_put(struct ion_handle *);
278
279 static void ion_handle_destroy(struct kref *kref)
280 {
281         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
282         struct ion_client *client = handle->client;
283         struct ion_buffer *buffer = handle->buffer;
284
285         mutex_lock(&buffer->lock);
286         while (handle->kmap_cnt)
287                 ion_handle_kmap_put(handle);
288         mutex_unlock(&buffer->lock);
289
290         idr_remove(&client->idr, handle->id);
291         if (!RB_EMPTY_NODE(&handle->node))
292                 rb_erase(&handle->node, &client->handles);
293
294         ion_buffer_remove_from_handle(buffer);
295         ion_buffer_put(buffer);
296
297         kfree(handle);
298 }
299
300 static void ion_handle_get(struct ion_handle *handle)
301 {
302         kref_get(&handle->ref);
303 }
304
305 int ion_handle_put_nolock(struct ion_handle *handle)
306 {
307         return kref_put(&handle->ref, ion_handle_destroy);
308 }
309
310 int ion_handle_put(struct ion_handle *handle)
311 {
312         struct ion_client *client = handle->client;
313         int ret;
314
315         mutex_lock(&client->lock);
316         ret = ion_handle_put_nolock(handle);
317         mutex_unlock(&client->lock);
318
319         return ret;
320 }
321
322 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
323                                             struct ion_buffer *buffer)
324 {
325         struct rb_node *n = client->handles.rb_node;
326
327         while (n) {
328                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
329
330                 if (buffer < entry->buffer)
331                         n = n->rb_left;
332                 else if (buffer > entry->buffer)
333                         n = n->rb_right;
334                 else
335                         return entry;
336         }
337         return ERR_PTR(-EINVAL);
338 }
339
340 struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
341                                                int id)
342 {
343         struct ion_handle *handle;
344
345         handle = idr_find(&client->idr, id);
346         if (handle)
347                 ion_handle_get(handle);
348
349         return handle ? handle : ERR_PTR(-EINVAL);
350 }
351
352 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
353                                                int id)
354 {
355         struct ion_handle *handle;
356
357         mutex_lock(&client->lock);
358         handle = ion_handle_get_by_id_nolock(client, id);
359         mutex_unlock(&client->lock);
360
361         return handle;
362 }
363
364 static bool ion_handle_validate(struct ion_client *client,
365                                 struct ion_handle *handle)
366 {
367         WARN_ON(!mutex_is_locked(&client->lock));
368         return idr_find(&client->idr, handle->id) == handle;
369 }
370
371 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
372 {
373         int id;
374         struct rb_node **p = &client->handles.rb_node;
375         struct rb_node *parent = NULL;
376         struct ion_handle *entry;
377
378         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
379         if (id < 0)
380                 return id;
381
382         handle->id = id;
383
384         while (*p) {
385                 parent = *p;
386                 entry = rb_entry(parent, struct ion_handle, node);
387
388                 if (handle->buffer < entry->buffer)
389                         p = &(*p)->rb_left;
390                 else if (handle->buffer > entry->buffer)
391                         p = &(*p)->rb_right;
392                 else
393                         WARN(1, "%s: buffer already found.", __func__);
394         }
395
396         rb_link_node(&handle->node, parent, p);
397         rb_insert_color(&handle->node, &client->handles);
398
399         return 0;
400 }
401
402 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
403                              size_t align, unsigned int heap_id_mask,
404                              unsigned int flags)
405 {
406         struct ion_handle *handle;
407         struct ion_device *dev = client->dev;
408         struct ion_buffer *buffer = NULL;
409         struct ion_heap *heap;
410         int ret;
411
412         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
413                  len, align, heap_id_mask, flags);
414         /*
415          * traverse the list of heaps available in this system in priority
416          * order.  If the heap type is supported by the client, and matches the
417          * request of the caller allocate from it.  Repeat until allocate has
418          * succeeded or all heaps have been tried
419          */
420         len = PAGE_ALIGN(len);
421
422         if (!len)
423                 return ERR_PTR(-EINVAL);
424
425         down_read(&dev->lock);
426         plist_for_each_entry(heap, &dev->heaps, node) {
427                 /* if the caller didn't specify this heap id */
428                 if (!((1 << heap->id) & heap_id_mask))
429                         continue;
430                 buffer = ion_buffer_create(heap, dev, len, align, flags);
431                 if (!IS_ERR(buffer))
432                         break;
433         }
434         up_read(&dev->lock);
435
436         if (buffer == NULL)
437                 return ERR_PTR(-ENODEV);
438
439         if (IS_ERR(buffer))
440                 return ERR_CAST(buffer);
441
442         handle = ion_handle_create(client, buffer);
443
444         /*
445          * ion_buffer_create will create a buffer with a ref_cnt of 1,
446          * and ion_handle_create will take a second reference, drop one here
447          */
448         ion_buffer_put(buffer);
449
450         if (IS_ERR(handle))
451                 return handle;
452
453         mutex_lock(&client->lock);
454         ret = ion_handle_add(client, handle);
455         mutex_unlock(&client->lock);
456         if (ret) {
457                 ion_handle_put(handle);
458                 handle = ERR_PTR(ret);
459         }
460
461         return handle;
462 }
463 EXPORT_SYMBOL(ion_alloc);
464
465 void ion_free_nolock(struct ion_client *client,
466                      struct ion_handle *handle)
467 {
468         if (!ion_handle_validate(client, handle)) {
469                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
470                 return;
471         }
472         ion_handle_put_nolock(handle);
473 }
474
475 void ion_free(struct ion_client *client, struct ion_handle *handle)
476 {
477         BUG_ON(client != handle->client);
478
479         mutex_lock(&client->lock);
480         ion_free_nolock(client, handle);
481         mutex_unlock(&client->lock);
482 }
483 EXPORT_SYMBOL(ion_free);
484
485 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
486 {
487         void *vaddr;
488
489         if (buffer->kmap_cnt) {
490                 buffer->kmap_cnt++;
491                 return buffer->vaddr;
492         }
493         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
494         if (WARN_ONCE(vaddr == NULL,
495                       "heap->ops->map_kernel should return ERR_PTR on error"))
496                 return ERR_PTR(-EINVAL);
497         if (IS_ERR(vaddr))
498                 return vaddr;
499         buffer->vaddr = vaddr;
500         buffer->kmap_cnt++;
501         return vaddr;
502 }
503
504 static void *ion_handle_kmap_get(struct ion_handle *handle)
505 {
506         struct ion_buffer *buffer = handle->buffer;
507         void *vaddr;
508
509         if (handle->kmap_cnt) {
510                 handle->kmap_cnt++;
511                 return buffer->vaddr;
512         }
513         vaddr = ion_buffer_kmap_get(buffer);
514         if (IS_ERR(vaddr))
515                 return vaddr;
516         handle->kmap_cnt++;
517         return vaddr;
518 }
519
520 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
521 {
522         buffer->kmap_cnt--;
523         if (!buffer->kmap_cnt) {
524                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
525                 buffer->vaddr = NULL;
526         }
527 }
528
529 static void ion_handle_kmap_put(struct ion_handle *handle)
530 {
531         struct ion_buffer *buffer = handle->buffer;
532
533         if (!handle->kmap_cnt) {
534                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
535                 return;
536         }
537         handle->kmap_cnt--;
538         if (!handle->kmap_cnt)
539                 ion_buffer_kmap_put(buffer);
540 }
541
542 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
543 {
544         struct ion_buffer *buffer;
545         void *vaddr;
546
547         mutex_lock(&client->lock);
548         if (!ion_handle_validate(client, handle)) {
549                 pr_err("%s: invalid handle passed to map_kernel.\n",
550                        __func__);
551                 mutex_unlock(&client->lock);
552                 return ERR_PTR(-EINVAL);
553         }
554
555         buffer = handle->buffer;
556
557         if (!handle->buffer->heap->ops->map_kernel) {
558                 pr_err("%s: map_kernel is not implemented by this heap.\n",
559                        __func__);
560                 mutex_unlock(&client->lock);
561                 return ERR_PTR(-ENODEV);
562         }
563
564         mutex_lock(&buffer->lock);
565         vaddr = ion_handle_kmap_get(handle);
566         mutex_unlock(&buffer->lock);
567         mutex_unlock(&client->lock);
568         return vaddr;
569 }
570 EXPORT_SYMBOL(ion_map_kernel);
571
572 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
573 {
574         struct ion_buffer *buffer;
575
576         mutex_lock(&client->lock);
577         buffer = handle->buffer;
578         mutex_lock(&buffer->lock);
579         ion_handle_kmap_put(handle);
580         mutex_unlock(&buffer->lock);
581         mutex_unlock(&client->lock);
582 }
583 EXPORT_SYMBOL(ion_unmap_kernel);
584
585 static struct mutex debugfs_mutex;
586 static struct rb_root *ion_root_client;
587 static int is_client_alive(struct ion_client *client)
588 {
589         struct rb_node *node;
590         struct ion_client *tmp;
591         struct ion_device *dev;
592
593         node = ion_root_client->rb_node;
594         dev = container_of(ion_root_client, struct ion_device, clients);
595
596         down_read(&dev->lock);
597         while (node) {
598                 tmp = rb_entry(node, struct ion_client, node);
599                 if (client < tmp) {
600                         node = node->rb_left;
601                 } else if (client > tmp) {
602                         node = node->rb_right;
603                 } else {
604                         up_read(&dev->lock);
605                         return 1;
606                 }
607         }
608
609         up_read(&dev->lock);
610         return 0;
611 }
612
613 static int ion_debug_client_show(struct seq_file *s, void *unused)
614 {
615         struct ion_client *client = s->private;
616         struct rb_node *n;
617         size_t sizes[ION_NUM_HEAP_IDS] = {0};
618         const char *names[ION_NUM_HEAP_IDS] = {NULL};
619         int i;
620
621         mutex_lock(&debugfs_mutex);
622         if (!is_client_alive(client)) {
623                 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
624                            client);
625                 mutex_unlock(&debugfs_mutex);
626                 return 0;
627         }
628
629         mutex_lock(&client->lock);
630         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
631                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
632                                                      node);
633                 unsigned int id = handle->buffer->heap->id;
634
635                 if (!names[id])
636                         names[id] = handle->buffer->heap->name;
637                 sizes[id] += handle->buffer->size;
638         }
639         mutex_unlock(&client->lock);
640         mutex_unlock(&debugfs_mutex);
641
642         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
643         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
644                 if (!names[i])
645                         continue;
646                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
647         }
648         return 0;
649 }
650
651 static int ion_debug_client_open(struct inode *inode, struct file *file)
652 {
653         return single_open(file, ion_debug_client_show, inode->i_private);
654 }
655
656 static const struct file_operations debug_client_fops = {
657         .open = ion_debug_client_open,
658         .read = seq_read,
659         .llseek = seq_lseek,
660         .release = single_release,
661 };
662
663 static int ion_get_client_serial(const struct rb_root *root,
664                                  const unsigned char *name)
665 {
666         int serial = -1;
667         struct rb_node *node;
668
669         for (node = rb_first(root); node; node = rb_next(node)) {
670                 struct ion_client *client = rb_entry(node, struct ion_client,
671                                                      node);
672
673                 if (strcmp(client->name, name))
674                         continue;
675                 serial = max(serial, client->display_serial);
676         }
677         return serial + 1;
678 }
679
680 struct ion_client *ion_client_create(struct ion_device *dev,
681                                      const char *name)
682 {
683         struct ion_client *client;
684         struct task_struct *task;
685         struct rb_node **p;
686         struct rb_node *parent = NULL;
687         struct ion_client *entry;
688         pid_t pid;
689
690         if (!name) {
691                 pr_err("%s: Name cannot be null\n", __func__);
692                 return ERR_PTR(-EINVAL);
693         }
694
695         get_task_struct(current->group_leader);
696         task_lock(current->group_leader);
697         pid = task_pid_nr(current->group_leader);
698         /*
699          * don't bother to store task struct for kernel threads,
700          * they can't be killed anyway
701          */
702         if (current->group_leader->flags & PF_KTHREAD) {
703                 put_task_struct(current->group_leader);
704                 task = NULL;
705         } else {
706                 task = current->group_leader;
707         }
708         task_unlock(current->group_leader);
709
710         client = kzalloc(sizeof(*client), GFP_KERNEL);
711         if (!client)
712                 goto err_put_task_struct;
713
714         client->dev = dev;
715         client->handles = RB_ROOT;
716         idr_init(&client->idr);
717         mutex_init(&client->lock);
718         client->task = task;
719         client->pid = pid;
720         client->name = kstrdup(name, GFP_KERNEL);
721         if (!client->name)
722                 goto err_free_client;
723
724         down_write(&dev->lock);
725         client->display_serial = ion_get_client_serial(&dev->clients, name);
726         client->display_name = kasprintf(
727                 GFP_KERNEL, "%s-%d", name, client->display_serial);
728         if (!client->display_name) {
729                 up_write(&dev->lock);
730                 goto err_free_client_name;
731         }
732         p = &dev->clients.rb_node;
733         while (*p) {
734                 parent = *p;
735                 entry = rb_entry(parent, struct ion_client, node);
736
737                 if (client < entry)
738                         p = &(*p)->rb_left;
739                 else if (client > entry)
740                         p = &(*p)->rb_right;
741         }
742         rb_link_node(&client->node, parent, p);
743         rb_insert_color(&client->node, &dev->clients);
744
745         client->debug_root = debugfs_create_file(client->display_name, 0664,
746                                                  dev->clients_debug_root,
747                                                  client, &debug_client_fops);
748         if (!client->debug_root) {
749                 char buf[256], *path;
750
751                 path = dentry_path(dev->clients_debug_root, buf, 256);
752                 pr_err("Failed to create client debugfs at %s/%s\n",
753                        path, client->display_name);
754         }
755
756         up_write(&dev->lock);
757
758         return client;
759
760 err_free_client_name:
761         kfree(client->name);
762 err_free_client:
763         kfree(client);
764 err_put_task_struct:
765         if (task)
766                 put_task_struct(current->group_leader);
767         return ERR_PTR(-ENOMEM);
768 }
769 EXPORT_SYMBOL(ion_client_create);
770
771 void ion_client_destroy(struct ion_client *client)
772 {
773         struct ion_device *dev = client->dev;
774         struct rb_node *n;
775
776         pr_debug("%s: %d\n", __func__, __LINE__);
777         mutex_lock(&debugfs_mutex);
778         while ((n = rb_first(&client->handles))) {
779                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
780                                                      node);
781                 ion_handle_destroy(&handle->ref);
782         }
783
784         idr_destroy(&client->idr);
785
786         down_write(&dev->lock);
787         if (client->task)
788                 put_task_struct(client->task);
789         rb_erase(&client->node, &dev->clients);
790         debugfs_remove_recursive(client->debug_root);
791         up_write(&dev->lock);
792
793         kfree(client->display_name);
794         kfree(client->name);
795         kfree(client);
796         mutex_unlock(&debugfs_mutex);
797 }
798 EXPORT_SYMBOL(ion_client_destroy);
799
800 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
801                                        struct device *dev,
802                                        enum dma_data_direction direction);
803
804 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
805                                         enum dma_data_direction direction)
806 {
807         struct dma_buf *dmabuf = attachment->dmabuf;
808         struct ion_buffer *buffer = dmabuf->priv;
809
810         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
811         return buffer->sg_table;
812 }
813
814 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
815                               struct sg_table *table,
816                               enum dma_data_direction direction)
817 {
818 }
819
820 void ion_pages_sync_for_device(struct device *dev, struct page *page,
821                                size_t size, enum dma_data_direction dir)
822 {
823         struct scatterlist sg;
824
825         sg_init_table(&sg, 1);
826         sg_set_page(&sg, page, size, 0);
827         /*
828          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
829          * for the targeted device, but this works on the currently targeted
830          * hardware.
831          */
832         sg_dma_address(&sg) = page_to_phys(page);
833         dma_sync_sg_for_device(dev, &sg, 1, dir);
834 }
835
836 struct ion_vma_list {
837         struct list_head list;
838         struct vm_area_struct *vma;
839 };
840
841 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
842                                        struct device *dev,
843                                        enum dma_data_direction dir)
844 {
845         struct ion_vma_list *vma_list;
846         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
847         int i;
848
849         pr_debug("%s: syncing for device %s\n", __func__,
850                  dev ? dev_name(dev) : "null");
851
852         if (!ion_buffer_fault_user_mappings(buffer))
853                 return;
854
855         mutex_lock(&buffer->lock);
856         for (i = 0; i < pages; i++) {
857                 struct page *page = buffer->pages[i];
858
859                 if (ion_buffer_page_is_dirty(page))
860                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
861                                                   PAGE_SIZE, dir);
862
863                 ion_buffer_page_clean(buffer->pages + i);
864         }
865         list_for_each_entry(vma_list, &buffer->vmas, list) {
866                 struct vm_area_struct *vma = vma_list->vma;
867
868                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
869                                NULL);
870         }
871         mutex_unlock(&buffer->lock);
872 }
873
874 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
875 {
876         struct ion_buffer *buffer = vma->vm_private_data;
877         unsigned long pfn;
878         int ret;
879
880         mutex_lock(&buffer->lock);
881         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
882         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
883
884         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
885         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
886         mutex_unlock(&buffer->lock);
887         if (ret)
888                 return VM_FAULT_ERROR;
889
890         return VM_FAULT_NOPAGE;
891 }
892
893 static void ion_vm_open(struct vm_area_struct *vma)
894 {
895         struct ion_buffer *buffer = vma->vm_private_data;
896         struct ion_vma_list *vma_list;
897
898         vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
899         if (!vma_list)
900                 return;
901         vma_list->vma = vma;
902         mutex_lock(&buffer->lock);
903         list_add(&vma_list->list, &buffer->vmas);
904         mutex_unlock(&buffer->lock);
905         pr_debug("%s: adding %p\n", __func__, vma);
906 }
907
908 static void ion_vm_close(struct vm_area_struct *vma)
909 {
910         struct ion_buffer *buffer = vma->vm_private_data;
911         struct ion_vma_list *vma_list, *tmp;
912
913         pr_debug("%s\n", __func__);
914         mutex_lock(&buffer->lock);
915         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
916                 if (vma_list->vma != vma)
917                         continue;
918                 list_del(&vma_list->list);
919                 kfree(vma_list);
920                 pr_debug("%s: deleting %p\n", __func__, vma);
921                 break;
922         }
923         mutex_unlock(&buffer->lock);
924 }
925
926 static const struct vm_operations_struct ion_vma_ops = {
927         .open = ion_vm_open,
928         .close = ion_vm_close,
929         .fault = ion_vm_fault,
930 };
931
932 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
933 {
934         struct ion_buffer *buffer = dmabuf->priv;
935         int ret = 0;
936
937         if (!buffer->heap->ops->map_user) {
938                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
939                        __func__);
940                 return -EINVAL;
941         }
942
943         if (ion_buffer_fault_user_mappings(buffer)) {
944                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
945                                                         VM_DONTDUMP;
946                 vma->vm_private_data = buffer;
947                 vma->vm_ops = &ion_vma_ops;
948                 ion_vm_open(vma);
949                 return 0;
950         }
951
952         if (!(buffer->flags & ION_FLAG_CACHED))
953                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
954
955         mutex_lock(&buffer->lock);
956         /* now map it to userspace */
957         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
958         mutex_unlock(&buffer->lock);
959
960         if (ret)
961                 pr_err("%s: failure mapping buffer to userspace\n",
962                        __func__);
963
964         return ret;
965 }
966
967 static void ion_dma_buf_release(struct dma_buf *dmabuf)
968 {
969         struct ion_buffer *buffer = dmabuf->priv;
970
971         ion_buffer_put(buffer);
972 }
973
974 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
975 {
976         struct ion_buffer *buffer = dmabuf->priv;
977
978         return buffer->vaddr + offset * PAGE_SIZE;
979 }
980
981 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
982                                void *ptr)
983 {
984 }
985
986 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
987                                         enum dma_data_direction direction)
988 {
989         struct ion_buffer *buffer = dmabuf->priv;
990         void *vaddr;
991
992         if (!buffer->heap->ops->map_kernel) {
993                 pr_err("%s: map kernel is not implemented by this heap.\n",
994                        __func__);
995                 return -ENODEV;
996         }
997
998         mutex_lock(&buffer->lock);
999         vaddr = ion_buffer_kmap_get(buffer);
1000         mutex_unlock(&buffer->lock);
1001         return PTR_ERR_OR_ZERO(vaddr);
1002 }
1003
1004 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1005                                       enum dma_data_direction direction)
1006 {
1007         struct ion_buffer *buffer = dmabuf->priv;
1008
1009         mutex_lock(&buffer->lock);
1010         ion_buffer_kmap_put(buffer);
1011         mutex_unlock(&buffer->lock);
1012
1013         return 0;
1014 }
1015
1016 static struct dma_buf_ops dma_buf_ops = {
1017         .map_dma_buf = ion_map_dma_buf,
1018         .unmap_dma_buf = ion_unmap_dma_buf,
1019         .mmap = ion_mmap,
1020         .release = ion_dma_buf_release,
1021         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1022         .end_cpu_access = ion_dma_buf_end_cpu_access,
1023         .kmap_atomic = ion_dma_buf_kmap,
1024         .kunmap_atomic = ion_dma_buf_kunmap,
1025         .kmap = ion_dma_buf_kmap,
1026         .kunmap = ion_dma_buf_kunmap,
1027 };
1028
1029 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1030                                   struct ion_handle *handle)
1031 {
1032         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1033         struct ion_buffer *buffer;
1034         struct dma_buf *dmabuf;
1035         bool valid_handle;
1036
1037         mutex_lock(&client->lock);
1038         valid_handle = ion_handle_validate(client, handle);
1039         if (!valid_handle) {
1040                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1041                 mutex_unlock(&client->lock);
1042                 return ERR_PTR(-EINVAL);
1043         }
1044         buffer = handle->buffer;
1045         ion_buffer_get(buffer);
1046         mutex_unlock(&client->lock);
1047
1048         exp_info.ops = &dma_buf_ops;
1049         exp_info.size = buffer->size;
1050         exp_info.flags = O_RDWR;
1051         exp_info.priv = buffer;
1052
1053         dmabuf = dma_buf_export(&exp_info);
1054         if (IS_ERR(dmabuf)) {
1055                 ion_buffer_put(buffer);
1056                 return dmabuf;
1057         }
1058
1059         return dmabuf;
1060 }
1061 EXPORT_SYMBOL(ion_share_dma_buf);
1062
1063 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1064 {
1065         struct dma_buf *dmabuf;
1066         int fd;
1067
1068         dmabuf = ion_share_dma_buf(client, handle);
1069         if (IS_ERR(dmabuf))
1070                 return PTR_ERR(dmabuf);
1071
1072         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1073         if (fd < 0)
1074                 dma_buf_put(dmabuf);
1075
1076         return fd;
1077 }
1078 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1079
1080 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1081                                       struct dma_buf *dmabuf)
1082 {
1083         struct ion_buffer *buffer;
1084         struct ion_handle *handle;
1085         int ret;
1086
1087         /* if this memory came from ion */
1088
1089         if (dmabuf->ops != &dma_buf_ops) {
1090                 pr_err("%s: can not import dmabuf from another exporter\n",
1091                        __func__);
1092                 return ERR_PTR(-EINVAL);
1093         }
1094         buffer = dmabuf->priv;
1095
1096         mutex_lock(&client->lock);
1097         /* if a handle exists for this buffer just take a reference to it */
1098         handle = ion_handle_lookup(client, buffer);
1099         if (!IS_ERR(handle)) {
1100                 ion_handle_get(handle);
1101                 mutex_unlock(&client->lock);
1102                 goto end;
1103         }
1104
1105         handle = ion_handle_create(client, buffer);
1106         if (IS_ERR(handle)) {
1107                 mutex_unlock(&client->lock);
1108                 goto end;
1109         }
1110
1111         ret = ion_handle_add(client, handle);
1112         mutex_unlock(&client->lock);
1113         if (ret) {
1114                 ion_handle_put(handle);
1115                 handle = ERR_PTR(ret);
1116         }
1117
1118 end:
1119         return handle;
1120 }
1121 EXPORT_SYMBOL(ion_import_dma_buf);
1122
1123 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1124 {
1125         struct dma_buf *dmabuf;
1126         struct ion_handle *handle;
1127
1128         dmabuf = dma_buf_get(fd);
1129         if (IS_ERR(dmabuf))
1130                 return ERR_CAST(dmabuf);
1131
1132         handle = ion_import_dma_buf(client, dmabuf);
1133         dma_buf_put(dmabuf);
1134         return handle;
1135 }
1136 EXPORT_SYMBOL(ion_import_dma_buf_fd);
1137
1138 int ion_sync_for_device(struct ion_client *client, int fd)
1139 {
1140         struct dma_buf *dmabuf;
1141         struct ion_buffer *buffer;
1142
1143         dmabuf = dma_buf_get(fd);
1144         if (IS_ERR(dmabuf))
1145                 return PTR_ERR(dmabuf);
1146
1147         /* if this memory came from ion */
1148         if (dmabuf->ops != &dma_buf_ops) {
1149                 pr_err("%s: can not sync dmabuf from another exporter\n",
1150                        __func__);
1151                 dma_buf_put(dmabuf);
1152                 return -EINVAL;
1153         }
1154         buffer = dmabuf->priv;
1155
1156         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1157                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1158         dma_buf_put(dmabuf);
1159         return 0;
1160 }
1161
1162 int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1163 {
1164         struct ion_device *dev = client->dev;
1165         struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1166         int ret = -EINVAL, cnt = 0, max_cnt;
1167         struct ion_heap *heap;
1168         struct ion_heap_data hdata;
1169
1170         memset(&hdata, 0, sizeof(hdata));
1171
1172         down_read(&dev->lock);
1173         if (!buffer) {
1174                 query->cnt = dev->heap_cnt;
1175                 ret = 0;
1176                 goto out;
1177         }
1178
1179         if (query->cnt <= 0)
1180                 goto out;
1181
1182         max_cnt = query->cnt;
1183
1184         plist_for_each_entry(heap, &dev->heaps, node) {
1185                 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1186                 hdata.name[sizeof(hdata.name) - 1] = '\0';
1187                 hdata.type = heap->type;
1188                 hdata.heap_id = heap->id;
1189
1190                 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1191                         ret = -EFAULT;
1192                         goto out;
1193                 }
1194
1195                 cnt++;
1196                 if (cnt >= max_cnt)
1197                         break;
1198         }
1199
1200         query->cnt = cnt;
1201 out:
1202         up_read(&dev->lock);
1203         return ret;
1204 }
1205
1206 static int ion_release(struct inode *inode, struct file *file)
1207 {
1208         struct ion_client *client = file->private_data;
1209
1210         pr_debug("%s: %d\n", __func__, __LINE__);
1211         ion_client_destroy(client);
1212         return 0;
1213 }
1214
1215 static int ion_open(struct inode *inode, struct file *file)
1216 {
1217         struct miscdevice *miscdev = file->private_data;
1218         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1219         struct ion_client *client;
1220         char debug_name[64];
1221
1222         pr_debug("%s: %d\n", __func__, __LINE__);
1223         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1224         client = ion_client_create(dev, debug_name);
1225         if (IS_ERR(client))
1226                 return PTR_ERR(client);
1227         file->private_data = client;
1228
1229         return 0;
1230 }
1231
1232 static const struct file_operations ion_fops = {
1233         .owner          = THIS_MODULE,
1234         .open           = ion_open,
1235         .release        = ion_release,
1236         .unlocked_ioctl = ion_ioctl,
1237         .compat_ioctl   = compat_ion_ioctl,
1238 };
1239
1240 static size_t ion_debug_heap_total(struct ion_client *client,
1241                                    unsigned int id)
1242 {
1243         size_t size = 0;
1244         struct rb_node *n;
1245
1246         mutex_lock(&client->lock);
1247         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1248                 struct ion_handle *handle = rb_entry(n,
1249                                                      struct ion_handle,
1250                                                      node);
1251                 if (handle->buffer->heap->id == id)
1252                         size += handle->buffer->size;
1253         }
1254         mutex_unlock(&client->lock);
1255         return size;
1256 }
1257
1258 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1259 {
1260         struct ion_heap *heap = s->private;
1261         struct ion_device *dev = heap->dev;
1262         struct rb_node *n;
1263         size_t total_size = 0;
1264         size_t total_orphaned_size = 0;
1265
1266         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1267         seq_puts(s, "----------------------------------------------------\n");
1268
1269         mutex_lock(&debugfs_mutex);
1270         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1271                 struct ion_client *client = rb_entry(n, struct ion_client,
1272                                                      node);
1273                 size_t size = ion_debug_heap_total(client, heap->id);
1274
1275                 if (!size)
1276                         continue;
1277                 if (client->task) {
1278                         char task_comm[TASK_COMM_LEN];
1279
1280                         get_task_comm(task_comm, client->task);
1281                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1282                                    client->pid, size);
1283                 } else {
1284                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1285                                    client->pid, size);
1286                 }
1287         }
1288         mutex_unlock(&debugfs_mutex);
1289
1290         seq_puts(s, "----------------------------------------------------\n");
1291         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1292         mutex_lock(&dev->buffer_lock);
1293         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1294                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1295                                                      node);
1296                 if (buffer->heap->id != heap->id)
1297                         continue;
1298                 total_size += buffer->size;
1299                 if (!buffer->handle_count) {
1300                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1301                                    buffer->task_comm, buffer->pid,
1302                                    buffer->size, buffer->kmap_cnt,
1303                                    atomic_read(&buffer->ref.refcount));
1304                         total_orphaned_size += buffer->size;
1305                 }
1306         }
1307         mutex_unlock(&dev->buffer_lock);
1308         seq_puts(s, "----------------------------------------------------\n");
1309         seq_printf(s, "%16s %16zu\n", "total orphaned",
1310                    total_orphaned_size);
1311         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1312         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1313                 seq_printf(s, "%16s %16zu\n", "deferred free",
1314                            heap->free_list_size);
1315         seq_puts(s, "----------------------------------------------------\n");
1316
1317         if (heap->debug_show)
1318                 heap->debug_show(heap, s, unused);
1319
1320         return 0;
1321 }
1322
1323 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1324 {
1325         return single_open(file, ion_debug_heap_show, inode->i_private);
1326 }
1327
1328 static const struct file_operations debug_heap_fops = {
1329         .open = ion_debug_heap_open,
1330         .read = seq_read,
1331         .llseek = seq_lseek,
1332         .release = single_release,
1333 };
1334
1335 static int debug_shrink_set(void *data, u64 val)
1336 {
1337         struct ion_heap *heap = data;
1338         struct shrink_control sc;
1339         int objs;
1340
1341         sc.gfp_mask = GFP_HIGHUSER;
1342         sc.nr_to_scan = val;
1343
1344         if (!val) {
1345                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1346                 sc.nr_to_scan = objs;
1347         }
1348
1349         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1350         return 0;
1351 }
1352
1353 static int debug_shrink_get(void *data, u64 *val)
1354 {
1355         struct ion_heap *heap = data;
1356         struct shrink_control sc;
1357         int objs;
1358
1359         sc.gfp_mask = GFP_HIGHUSER;
1360         sc.nr_to_scan = 0;
1361
1362         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1363         *val = objs;
1364         return 0;
1365 }
1366
1367 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1368                         debug_shrink_set, "%llu\n");
1369
1370 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1371 {
1372         struct dentry *debug_file;
1373
1374         if (!heap->ops->allocate || !heap->ops->free)
1375                 pr_err("%s: can not add heap with invalid ops struct.\n",
1376                        __func__);
1377
1378         spin_lock_init(&heap->free_lock);
1379         heap->free_list_size = 0;
1380
1381         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1382                 ion_heap_init_deferred_free(heap);
1383
1384         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1385                 ion_heap_init_shrinker(heap);
1386
1387         heap->dev = dev;
1388         down_write(&dev->lock);
1389         /*
1390          * use negative heap->id to reverse the priority -- when traversing
1391          * the list later attempt higher id numbers first
1392          */
1393         plist_node_init(&heap->node, -heap->id);
1394         plist_add(&heap->node, &dev->heaps);
1395         debug_file = debugfs_create_file(heap->name, 0664,
1396                                          dev->heaps_debug_root, heap,
1397                                          &debug_heap_fops);
1398
1399         if (!debug_file) {
1400                 char buf[256], *path;
1401
1402                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1403                 pr_err("Failed to create heap debugfs at %s/%s\n",
1404                        path, heap->name);
1405         }
1406
1407         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1408                 char debug_name[64];
1409
1410                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1411                 debug_file = debugfs_create_file(
1412                         debug_name, 0644, dev->heaps_debug_root, heap,
1413                         &debug_shrink_fops);
1414                 if (!debug_file) {
1415                         char buf[256], *path;
1416
1417                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1418                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1419                                path, debug_name);
1420                 }
1421         }
1422
1423         dev->heap_cnt++;
1424         up_write(&dev->lock);
1425 }
1426 EXPORT_SYMBOL(ion_device_add_heap);
1427
1428 struct ion_device *ion_device_create(long (*custom_ioctl)
1429                                      (struct ion_client *client,
1430                                       unsigned int cmd,
1431                                       unsigned long arg))
1432 {
1433         struct ion_device *idev;
1434         int ret;
1435
1436         idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1437         if (!idev)
1438                 return ERR_PTR(-ENOMEM);
1439
1440         idev->dev.minor = MISC_DYNAMIC_MINOR;
1441         idev->dev.name = "ion";
1442         idev->dev.fops = &ion_fops;
1443         idev->dev.parent = NULL;
1444         ret = misc_register(&idev->dev);
1445         if (ret) {
1446                 pr_err("ion: failed to register misc device.\n");
1447                 kfree(idev);
1448                 return ERR_PTR(ret);
1449         }
1450
1451         idev->debug_root = debugfs_create_dir("ion", NULL);
1452         if (!idev->debug_root) {
1453                 pr_err("ion: failed to create debugfs root directory.\n");
1454                 goto debugfs_done;
1455         }
1456         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1457         if (!idev->heaps_debug_root) {
1458                 pr_err("ion: failed to create debugfs heaps directory.\n");
1459                 goto debugfs_done;
1460         }
1461         idev->clients_debug_root = debugfs_create_dir("clients",
1462                                                 idev->debug_root);
1463         if (!idev->clients_debug_root)
1464                 pr_err("ion: failed to create debugfs clients directory.\n");
1465
1466 debugfs_done:
1467
1468         idev->custom_ioctl = custom_ioctl;
1469         idev->buffers = RB_ROOT;
1470         mutex_init(&idev->buffer_lock);
1471         init_rwsem(&idev->lock);
1472         plist_head_init(&idev->heaps);
1473         idev->clients = RB_ROOT;
1474         ion_root_client = &idev->clients;
1475         mutex_init(&debugfs_mutex);
1476         return idev;
1477 }
1478 EXPORT_SYMBOL(ion_device_create);
1479
1480 void ion_device_destroy(struct ion_device *dev)
1481 {
1482         misc_deregister(&dev->dev);
1483         debugfs_remove_recursive(dev->debug_root);
1484         /* XXX need to free the heaps and clients ? */
1485         kfree(dev);
1486 }
1487 EXPORT_SYMBOL(ion_device_destroy);