drm/exynos: remove function convert_to_vm_err_msg
[cascardo/linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14
15 #include <linux/shmem_fs.h>
16 #include <drm/exynos_drm.h>
17
18 #include "exynos_drm_drv.h"
19 #include "exynos_drm_gem.h"
20 #include "exynos_drm_buf.h"
21 #include "exynos_drm_iommu.h"
22
23 static int check_gem_flags(unsigned int flags)
24 {
25         if (flags & ~(EXYNOS_BO_MASK)) {
26                 DRM_ERROR("invalid flags.\n");
27                 return -EINVAL;
28         }
29
30         return 0;
31 }
32
33 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
34                                         struct vm_area_struct *vma)
35 {
36         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
37
38         /* non-cachable as default. */
39         if (obj->flags & EXYNOS_BO_CACHABLE)
40                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41         else if (obj->flags & EXYNOS_BO_WC)
42                 vma->vm_page_prot =
43                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
44         else
45                 vma->vm_page_prot =
46                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
47 }
48
49 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
50 {
51         /* TODO */
52
53         return roundup(size, PAGE_SIZE);
54 }
55
56 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
57                                         struct vm_area_struct *vma,
58                                         unsigned long f_vaddr,
59                                         pgoff_t page_offset)
60 {
61         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
62         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
63         unsigned long pfn;
64
65         if (page_offset >= (buf->size >> PAGE_SHIFT)) {
66                 DRM_ERROR("invalid page offset\n");
67                 return -EINVAL;
68         }
69
70         pfn = page_to_pfn(buf->pages[page_offset]);
71
72         return vm_insert_mixed(vma, f_vaddr, pfn);
73 }
74
75 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
76                                         struct drm_file *file_priv,
77                                         unsigned int *handle)
78 {
79         int ret;
80
81         /*
82          * allocate a id of idr table where the obj is registered
83          * and handle has the id what user can see.
84          */
85         ret = drm_gem_handle_create(file_priv, obj, handle);
86         if (ret)
87                 return ret;
88
89         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
90
91         /* drop reference from allocate - handle holds it now. */
92         drm_gem_object_unreference_unlocked(obj);
93
94         return 0;
95 }
96
97 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
98 {
99         struct drm_gem_object *obj;
100         struct exynos_drm_gem_buf *buf;
101
102         obj = &exynos_gem_obj->base;
103         buf = exynos_gem_obj->buffer;
104
105         DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
106
107         /*
108          * do not release memory region from exporter.
109          *
110          * the region will be released by exporter
111          * once dmabuf's refcount becomes 0.
112          */
113         if (obj->import_attach)
114                 goto out;
115
116         exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
117
118 out:
119         exynos_drm_fini_buf(obj->dev, buf);
120         exynos_gem_obj->buffer = NULL;
121
122         drm_gem_free_mmap_offset(obj);
123
124         /* release file pointer to gem object. */
125         drm_gem_object_release(obj);
126
127         kfree(exynos_gem_obj);
128         exynos_gem_obj = NULL;
129 }
130
131 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
132                                                 unsigned int gem_handle,
133                                                 struct drm_file *file_priv)
134 {
135         struct exynos_drm_gem_obj *exynos_gem_obj;
136         struct drm_gem_object *obj;
137
138         obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
139         if (!obj) {
140                 DRM_ERROR("failed to lookup gem object.\n");
141                 return 0;
142         }
143
144         exynos_gem_obj = to_exynos_gem_obj(obj);
145
146         drm_gem_object_unreference_unlocked(obj);
147
148         return exynos_gem_obj->buffer->size;
149 }
150
151
152 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
153                                                       unsigned long size)
154 {
155         struct exynos_drm_gem_obj *exynos_gem_obj;
156         struct drm_gem_object *obj;
157         int ret;
158
159         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
160         if (!exynos_gem_obj)
161                 return NULL;
162
163         exynos_gem_obj->size = size;
164         obj = &exynos_gem_obj->base;
165
166         ret = drm_gem_object_init(dev, obj, size);
167         if (ret < 0) {
168                 DRM_ERROR("failed to initialize gem object\n");
169                 kfree(exynos_gem_obj);
170                 return NULL;
171         }
172
173         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
174
175         return exynos_gem_obj;
176 }
177
178 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
179                                                 unsigned int flags,
180                                                 unsigned long size)
181 {
182         struct exynos_drm_gem_obj *exynos_gem_obj;
183         struct exynos_drm_gem_buf *buf;
184         int ret;
185
186         if (!size) {
187                 DRM_ERROR("invalid size.\n");
188                 return ERR_PTR(-EINVAL);
189         }
190
191         size = roundup_gem_size(size, flags);
192
193         ret = check_gem_flags(flags);
194         if (ret)
195                 return ERR_PTR(ret);
196
197         buf = exynos_drm_init_buf(dev, size);
198         if (!buf)
199                 return ERR_PTR(-ENOMEM);
200
201         exynos_gem_obj = exynos_drm_gem_init(dev, size);
202         if (!exynos_gem_obj) {
203                 ret = -ENOMEM;
204                 goto err_fini_buf;
205         }
206
207         exynos_gem_obj->buffer = buf;
208
209         /* set memory type and cache attribute from user side. */
210         exynos_gem_obj->flags = flags;
211
212         ret = exynos_drm_alloc_buf(dev, buf, flags);
213         if (ret < 0)
214                 goto err_gem_fini;
215
216         return exynos_gem_obj;
217
218 err_gem_fini:
219         drm_gem_object_release(&exynos_gem_obj->base);
220         kfree(exynos_gem_obj);
221 err_fini_buf:
222         exynos_drm_fini_buf(dev, buf);
223         return ERR_PTR(ret);
224 }
225
226 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
227                                 struct drm_file *file_priv)
228 {
229         struct drm_exynos_gem_create *args = data;
230         struct exynos_drm_gem_obj *exynos_gem_obj;
231         int ret;
232
233         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
234         if (IS_ERR(exynos_gem_obj))
235                 return PTR_ERR(exynos_gem_obj);
236
237         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
238                         &args->handle);
239         if (ret) {
240                 exynos_drm_gem_destroy(exynos_gem_obj);
241                 return ret;
242         }
243
244         return 0;
245 }
246
247 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
248                                         unsigned int gem_handle,
249                                         struct drm_file *filp)
250 {
251         struct exynos_drm_gem_obj *exynos_gem_obj;
252         struct drm_gem_object *obj;
253
254         obj = drm_gem_object_lookup(dev, filp, gem_handle);
255         if (!obj) {
256                 DRM_ERROR("failed to lookup gem object.\n");
257                 return ERR_PTR(-EINVAL);
258         }
259
260         exynos_gem_obj = to_exynos_gem_obj(obj);
261
262         return &exynos_gem_obj->buffer->dma_addr;
263 }
264
265 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
266                                         unsigned int gem_handle,
267                                         struct drm_file *filp)
268 {
269         struct drm_gem_object *obj;
270
271         obj = drm_gem_object_lookup(dev, filp, gem_handle);
272         if (!obj) {
273                 DRM_ERROR("failed to lookup gem object.\n");
274                 return;
275         }
276
277         drm_gem_object_unreference_unlocked(obj);
278
279         /*
280          * decrease obj->refcount one more time because we has already
281          * increased it at exynos_drm_gem_get_dma_addr().
282          */
283         drm_gem_object_unreference_unlocked(obj);
284 }
285
286 int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
287                                       struct vm_area_struct *vma)
288 {
289         struct drm_device *drm_dev = exynos_gem_obj->base.dev;
290         struct exynos_drm_gem_buf *buffer;
291         unsigned long vm_size;
292         int ret;
293
294         vma->vm_flags &= ~VM_PFNMAP;
295         vma->vm_pgoff = 0;
296
297         vm_size = vma->vm_end - vma->vm_start;
298
299         /*
300          * a buffer contains information to physically continuous memory
301          * allocated by user request or at framebuffer creation.
302          */
303         buffer = exynos_gem_obj->buffer;
304
305         /* check if user-requested size is valid. */
306         if (vm_size > buffer->size)
307                 return -EINVAL;
308
309         ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
310                                 buffer->dma_addr, buffer->size,
311                                 &buffer->dma_attrs);
312         if (ret < 0) {
313                 DRM_ERROR("failed to mmap.\n");
314                 return ret;
315         }
316
317         return 0;
318 }
319
320 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
321                                       struct drm_file *file_priv)
322 {       struct exynos_drm_gem_obj *exynos_gem_obj;
323         struct drm_exynos_gem_info *args = data;
324         struct drm_gem_object *obj;
325
326         mutex_lock(&dev->struct_mutex);
327
328         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
329         if (!obj) {
330                 DRM_ERROR("failed to lookup gem object.\n");
331                 mutex_unlock(&dev->struct_mutex);
332                 return -EINVAL;
333         }
334
335         exynos_gem_obj = to_exynos_gem_obj(obj);
336
337         args->flags = exynos_gem_obj->flags;
338         args->size = exynos_gem_obj->size;
339
340         drm_gem_object_unreference(obj);
341         mutex_unlock(&dev->struct_mutex);
342
343         return 0;
344 }
345
346 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
347 {
348         struct vm_area_struct *vma_copy;
349
350         vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
351         if (!vma_copy)
352                 return NULL;
353
354         if (vma->vm_ops && vma->vm_ops->open)
355                 vma->vm_ops->open(vma);
356
357         if (vma->vm_file)
358                 get_file(vma->vm_file);
359
360         memcpy(vma_copy, vma, sizeof(*vma));
361
362         vma_copy->vm_mm = NULL;
363         vma_copy->vm_next = NULL;
364         vma_copy->vm_prev = NULL;
365
366         return vma_copy;
367 }
368
369 void exynos_gem_put_vma(struct vm_area_struct *vma)
370 {
371         if (!vma)
372                 return;
373
374         if (vma->vm_ops && vma->vm_ops->close)
375                 vma->vm_ops->close(vma);
376
377         if (vma->vm_file)
378                 fput(vma->vm_file);
379
380         kfree(vma);
381 }
382
383 int exynos_gem_get_pages_from_userptr(unsigned long start,
384                                                 unsigned int npages,
385                                                 struct page **pages,
386                                                 struct vm_area_struct *vma)
387 {
388         int get_npages;
389
390         /* the memory region mmaped with VM_PFNMAP. */
391         if (vma_is_io(vma)) {
392                 unsigned int i;
393
394                 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
395                         unsigned long pfn;
396                         int ret = follow_pfn(vma, start, &pfn);
397                         if (ret)
398                                 return ret;
399
400                         pages[i] = pfn_to_page(pfn);
401                 }
402
403                 if (i != npages) {
404                         DRM_ERROR("failed to get user_pages.\n");
405                         return -EINVAL;
406                 }
407
408                 return 0;
409         }
410
411         get_npages = get_user_pages(current, current->mm, start,
412                                         npages, 1, 1, pages, NULL);
413         get_npages = max(get_npages, 0);
414         if (get_npages != npages) {
415                 DRM_ERROR("failed to get user_pages.\n");
416                 while (get_npages)
417                         put_page(pages[--get_npages]);
418                 return -EFAULT;
419         }
420
421         return 0;
422 }
423
424 void exynos_gem_put_pages_to_userptr(struct page **pages,
425                                         unsigned int npages,
426                                         struct vm_area_struct *vma)
427 {
428         if (!vma_is_io(vma)) {
429                 unsigned int i;
430
431                 for (i = 0; i < npages; i++) {
432                         set_page_dirty_lock(pages[i]);
433
434                         /*
435                          * undo the reference we took when populating
436                          * the table.
437                          */
438                         put_page(pages[i]);
439                 }
440         }
441 }
442
443 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
444                                 struct sg_table *sgt,
445                                 enum dma_data_direction dir)
446 {
447         int nents;
448
449         mutex_lock(&drm_dev->struct_mutex);
450
451         nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
452         if (!nents) {
453                 DRM_ERROR("failed to map sgl with dma.\n");
454                 mutex_unlock(&drm_dev->struct_mutex);
455                 return nents;
456         }
457
458         mutex_unlock(&drm_dev->struct_mutex);
459         return 0;
460 }
461
462 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
463                                 struct sg_table *sgt,
464                                 enum dma_data_direction dir)
465 {
466         dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
467 }
468
469 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
470 {
471         struct exynos_drm_gem_obj *exynos_gem_obj;
472         struct exynos_drm_gem_buf *buf;
473
474         exynos_gem_obj = to_exynos_gem_obj(obj);
475         buf = exynos_gem_obj->buffer;
476
477         if (obj->import_attach)
478                 drm_prime_gem_destroy(obj, buf->sgt);
479
480         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
481 }
482
483 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
484                                struct drm_device *dev,
485                                struct drm_mode_create_dumb *args)
486 {
487         struct exynos_drm_gem_obj *exynos_gem_obj;
488         int ret;
489
490         /*
491          * allocate memory to be used for framebuffer.
492          * - this callback would be called by user application
493          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
494          */
495
496         args->pitch = args->width * ((args->bpp + 7) / 8);
497         args->size = args->pitch * args->height;
498
499         if (is_drm_iommu_supported(dev)) {
500                 exynos_gem_obj = exynos_drm_gem_create(dev,
501                         EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
502                         args->size);
503         } else {
504                 exynos_gem_obj = exynos_drm_gem_create(dev,
505                         EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
506                         args->size);
507         }
508
509         if (IS_ERR(exynos_gem_obj)) {
510                 dev_warn(dev->dev, "FB allocation failed.\n");
511                 return PTR_ERR(exynos_gem_obj);
512         }
513
514         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
515                         &args->handle);
516         if (ret) {
517                 exynos_drm_gem_destroy(exynos_gem_obj);
518                 return ret;
519         }
520
521         return 0;
522 }
523
524 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
525                                    struct drm_device *dev, uint32_t handle,
526                                    uint64_t *offset)
527 {
528         struct drm_gem_object *obj;
529         int ret = 0;
530
531         mutex_lock(&dev->struct_mutex);
532
533         /*
534          * get offset of memory allocated for drm framebuffer.
535          * - this callback would be called by user application
536          *      with DRM_IOCTL_MODE_MAP_DUMB command.
537          */
538
539         obj = drm_gem_object_lookup(dev, file_priv, handle);
540         if (!obj) {
541                 DRM_ERROR("failed to lookup gem object.\n");
542                 ret = -EINVAL;
543                 goto unlock;
544         }
545
546         ret = drm_gem_create_mmap_offset(obj);
547         if (ret)
548                 goto out;
549
550         *offset = drm_vma_node_offset_addr(&obj->vma_node);
551         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
552
553 out:
554         drm_gem_object_unreference(obj);
555 unlock:
556         mutex_unlock(&dev->struct_mutex);
557         return ret;
558 }
559
560 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
561 {
562         struct drm_gem_object *obj = vma->vm_private_data;
563         struct drm_device *dev = obj->dev;
564         unsigned long f_vaddr;
565         pgoff_t page_offset;
566         int ret;
567
568         page_offset = ((unsigned long)vmf->virtual_address -
569                         vma->vm_start) >> PAGE_SHIFT;
570         f_vaddr = (unsigned long)vmf->virtual_address;
571
572         mutex_lock(&dev->struct_mutex);
573
574         ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
575         if (ret < 0)
576                 DRM_ERROR("failed to map a buffer with user.\n");
577
578         mutex_unlock(&dev->struct_mutex);
579
580         switch (ret) {
581         case 0:
582         case -ERESTARTSYS:
583         case -EINTR:
584                 return VM_FAULT_NOPAGE;
585         case -ENOMEM:
586                 return VM_FAULT_OOM;
587         default:
588                 return VM_FAULT_SIGBUS;
589         }
590 }
591
592 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
593 {
594         struct exynos_drm_gem_obj *exynos_gem_obj;
595         struct drm_gem_object *obj;
596         int ret;
597
598         /* set vm_area_struct. */
599         ret = drm_gem_mmap(filp, vma);
600         if (ret < 0) {
601                 DRM_ERROR("failed to mmap.\n");
602                 return ret;
603         }
604
605         obj = vma->vm_private_data;
606         exynos_gem_obj = to_exynos_gem_obj(obj);
607
608         ret = check_gem_flags(exynos_gem_obj->flags);
609         if (ret)
610                 goto err_close_vm;
611
612         update_vm_cache_attr(exynos_gem_obj, vma);
613
614         ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
615         if (ret)
616                 goto err_close_vm;
617
618         return ret;
619
620 err_close_vm:
621         drm_gem_vm_close(vma);
622         drm_gem_free_mmap_offset(obj);
623
624         return ret;
625 }