Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[cascardo/linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
index 15db801..62b9ea1 100644 (file)
 #include <drm/drm_vma_manager.h>
 
 #include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
 #include "exynos_drm_iommu.h"
 
-static unsigned int convert_to_vm_err_msg(int msg)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
 {
-       unsigned int out_msg;
+       struct drm_device *dev = obj->base.dev;
+       enum dma_attr attr;
+       unsigned int nr_pages;
 
-       switch (msg) {
-       case 0:
-       case -ERESTARTSYS:
-       case -EINTR:
-               out_msg = VM_FAULT_NOPAGE;
-               break;
+       if (obj->dma_addr) {
+               DRM_DEBUG_KMS("already allocated.\n");
+               return 0;
+       }
 
-       case -ENOMEM:
-               out_msg = VM_FAULT_OOM;
-               break;
+       init_dma_attrs(&obj->dma_attrs);
 
-       default:
-               out_msg = VM_FAULT_SIGBUS;
-               break;
-       }
+       /*
+        * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+        * region will be allocated else physically contiguous
+        * as possible.
+        */
+       if (!(obj->flags & EXYNOS_BO_NONCONTIG))
+               dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
 
-       return out_msg;
-}
+       /*
+        * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+        * else cachable mapping.
+        */
+       if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
+               attr = DMA_ATTR_WRITE_COMBINE;
+       else
+               attr = DMA_ATTR_NON_CONSISTENT;
 
-static int check_gem_flags(unsigned int flags)
-{
-       if (flags & ~(EXYNOS_BO_MASK)) {
-               DRM_ERROR("invalid flags.\n");
-               return -EINVAL;
-       }
+       dma_set_attr(attr, &obj->dma_attrs);
+       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
 
-       return 0;
-}
+       nr_pages = obj->size >> PAGE_SHIFT;
 
-static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
-                                       struct vm_area_struct *vma)
-{
-       DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+       if (!is_drm_iommu_supported(dev)) {
+               dma_addr_t start_addr;
+               unsigned int i = 0;
 
-       /* non-cachable as default. */
-       if (obj->flags & EXYNOS_BO_CACHABLE)
-               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       else if (obj->flags & EXYNOS_BO_WC)
-               vma->vm_page_prot =
-                       pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-       else
-               vma->vm_page_prot =
-                       pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-}
+               obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+               if (!obj->pages) {
+                       DRM_ERROR("failed to allocate pages.\n");
+                       return -ENOMEM;
+               }
 
-static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
-{
-       /* TODO */
+               obj->cookie = dma_alloc_attrs(dev->dev,
+                                       obj->size,
+                                       &obj->dma_addr, GFP_KERNEL,
+                                       &obj->dma_attrs);
+               if (!obj->cookie) {
+                       DRM_ERROR("failed to allocate buffer.\n");
+                       drm_free_large(obj->pages);
+                       return -ENOMEM;
+               }
 
-       return roundup(size, PAGE_SIZE);
+               start_addr = obj->dma_addr;
+               while (i < nr_pages) {
+                       obj->pages[i] = phys_to_page(start_addr);
+                       start_addr += PAGE_SIZE;
+                       i++;
+               }
+       } else {
+               obj->pages = dma_alloc_attrs(dev->dev, obj->size,
+                                       &obj->dma_addr, GFP_KERNEL,
+                                       &obj->dma_attrs);
+               if (!obj->pages) {
+                       DRM_ERROR("failed to allocate buffer.\n");
+                       return -ENOMEM;
+               }
+       }
+
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+                       (unsigned long)obj->dma_addr,
+                       obj->size);
+
+       return 0;
 }
 
-static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
-                                       struct vm_area_struct *vma,
-                                       unsigned long f_vaddr,
-                                       pgoff_t page_offset)
+static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
 {
-       struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-       struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-       struct scatterlist *sgl;
-       unsigned long pfn;
-       int i;
-
-       if (!buf->sgt)
-               return -EINTR;
+       struct drm_device *dev = obj->base.dev;
 
-       if (page_offset >= (buf->size >> PAGE_SHIFT)) {
-               DRM_ERROR("invalid page offset\n");
-               return -EINVAL;
+       if (!obj->dma_addr) {
+               DRM_DEBUG_KMS("dma_addr is invalid.\n");
+               return;
        }
 
-       sgl = buf->sgt->sgl;
-       for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
-               if (page_offset < (sgl->length >> PAGE_SHIFT))
-                       break;
-               page_offset -=  (sgl->length >> PAGE_SHIFT);
-       }
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+                       (unsigned long)obj->dma_addr, obj->size);
 
-       pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+       if (!is_drm_iommu_supported(dev)) {
+               dma_free_attrs(dev->dev, obj->size, obj->cookie,
+                               (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+               drm_free_large(obj->pages);
+       } else
+               dma_free_attrs(dev->dev, obj->size, obj->pages,
+                               (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
 
-       return vm_insert_mixed(vma, f_vaddr, pfn);
+       obj->dma_addr = (dma_addr_t)NULL;
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -131,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
 
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 {
-       struct drm_gem_object *obj;
-       struct exynos_drm_gem_buf *buf;
-
-       obj = &exynos_gem_obj->base;
-       buf = exynos_gem_obj->buffer;
+       struct drm_gem_object *obj = &exynos_gem_obj->base;
 
        DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
 
@@ -148,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
        if (obj->import_attach)
                goto out;
 
-       exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+       exynos_drm_free_buf(exynos_gem_obj);
 
 out:
-       exynos_drm_fini_buf(obj->dev, buf);
-       exynos_gem_obj->buffer = NULL;
-
        drm_gem_free_mmap_offset(obj);
 
        /* release file pointer to gem object. */
@@ -180,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
 
        drm_gem_object_unreference_unlocked(obj);
 
-       return exynos_gem_obj->buffer->size;
+       return exynos_gem_obj->size;
 }
 
 
@@ -193,7 +200,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
 
        exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
        if (!exynos_gem_obj)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        exynos_gem_obj->size = size;
        obj = &exynos_gem_obj->base;
@@ -202,7 +209,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
        if (ret < 0) {
                DRM_ERROR("failed to initialize gem object\n");
                kfree(exynos_gem_obj);
-               return NULL;
+               return ERR_PTR(ret);
        }
 
        DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -215,47 +222,35 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
                                                unsigned long size)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
-       struct exynos_drm_gem_buf *buf;
        int ret;
 
+       if (flags & ~(EXYNOS_BO_MASK)) {
+               DRM_ERROR("invalid flags.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        if (!size) {
                DRM_ERROR("invalid size.\n");
                return ERR_PTR(-EINVAL);
        }
 
-       size = roundup_gem_size(size, flags);
-
-       ret = check_gem_flags(flags);
-       if (ret)
-               return ERR_PTR(ret);
-
-       buf = exynos_drm_init_buf(dev, size);
-       if (!buf)
-               return ERR_PTR(-ENOMEM);
+       size = roundup(size, PAGE_SIZE);
 
        exynos_gem_obj = exynos_drm_gem_init(dev, size);
-       if (!exynos_gem_obj) {
-               ret = -ENOMEM;
-               goto err_fini_buf;
-       }
-
-       exynos_gem_obj->buffer = buf;
+       if (IS_ERR(exynos_gem_obj))
+               return exynos_gem_obj;
 
        /* set memory type and cache attribute from user side. */
        exynos_gem_obj->flags = flags;
 
-       ret = exynos_drm_alloc_buf(dev, buf, flags);
-       if (ret < 0)
-               goto err_gem_fini;
+       ret = exynos_drm_alloc_buf(exynos_gem_obj);
+       if (ret < 0) {
+               drm_gem_object_release(&exynos_gem_obj->base);
+               kfree(exynos_gem_obj);
+               return ERR_PTR(ret);
+       }
 
        return exynos_gem_obj;
-
-err_gem_fini:
-       drm_gem_object_release(&exynos_gem_obj->base);
-       kfree(exynos_gem_obj);
-err_fini_buf:
-       exynos_drm_fini_buf(dev, buf);
-       return ERR_PTR(ret);
 }
 
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -294,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
 
        exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       return &exynos_gem_obj->buffer->dma_addr;
+       return &exynos_gem_obj->dma_addr;
 }
 
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
@@ -318,115 +313,30 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
        drm_gem_object_unreference_unlocked(obj);
 }
 
-int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
-                                   struct drm_file *file_priv)
-{
-       struct drm_exynos_gem_map_off *args = data;
-
-       DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
-                       args->handle, (unsigned long)args->offset);
-
-       if (!(dev->driver->driver_features & DRIVER_GEM)) {
-               DRM_ERROR("does not support GEM.\n");
-               return -ENODEV;
-       }
-
-       return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
-                       &args->offset);
-}
-
-int exynos_drm_gem_mmap_buffer(struct file *filp,
+int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
                                      struct vm_area_struct *vma)
 {
-       struct drm_gem_object *obj = filp->private_data;
-       struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-       struct drm_device *drm_dev = obj->dev;
-       struct exynos_drm_gem_buf *buffer;
+       struct drm_device *drm_dev = exynos_gem_obj->base.dev;
        unsigned long vm_size;
        int ret;
 
-       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
-
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_private_data = obj;
-       vma->vm_ops = drm_dev->driver->gem_vm_ops;
-
-       update_vm_cache_attr(exynos_gem_obj, vma);
+       vma->vm_flags &= ~VM_PFNMAP;
+       vma->vm_pgoff = 0;
 
        vm_size = vma->vm_end - vma->vm_start;
 
-       /*
-        * a buffer contains information to physically continuous memory
-        * allocated by user request or at framebuffer creation.
-        */
-       buffer = exynos_gem_obj->buffer;
-
        /* check if user-requested size is valid. */
-       if (vm_size > buffer->size)
+       if (vm_size > exynos_gem_obj->size)
                return -EINVAL;
 
-       ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
-                               buffer->dma_addr, buffer->size,
-                               &buffer->dma_attrs);
+       ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
+                               exynos_gem_obj->dma_addr, exynos_gem_obj->size,
+                               &exynos_gem_obj->dma_attrs);
        if (ret < 0) {
                DRM_ERROR("failed to mmap.\n");
                return ret;
        }
 
-       /*
-        * take a reference to this mapping of the object. And this reference
-        * is unreferenced by the corresponding vm_close call.
-        */
-       drm_gem_object_reference(obj);
-
-       drm_vm_open_locked(drm_dev, vma);
-
-       return 0;
-}
-
-int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv)
-{
-       struct drm_exynos_file_private *exynos_file_priv;
-       struct drm_exynos_gem_mmap *args = data;
-       struct drm_gem_object *obj;
-       struct file *anon_filp;
-       unsigned long addr;
-
-       if (!(dev->driver->driver_features & DRIVER_GEM)) {
-               DRM_ERROR("does not support GEM.\n");
-               return -ENODEV;
-       }
-
-       mutex_lock(&dev->struct_mutex);
-
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-       if (!obj) {
-               DRM_ERROR("failed to lookup gem object.\n");
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-
-       exynos_file_priv = file_priv->driver_priv;
-       anon_filp = exynos_file_priv->anon_filp;
-       anon_filp->private_data = obj;
-
-       addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
-                       MAP_SHARED, 0);
-
-       drm_gem_object_unreference(obj);
-
-       if (IS_ERR_VALUE(addr)) {
-               mutex_unlock(&dev->struct_mutex);
-               return (int)addr;
-       }
-
-       mutex_unlock(&dev->struct_mutex);
-
-       args->mapped = addr;
-
-       DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
-
        return 0;
 }
 
@@ -581,15 +491,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
 
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
-       struct exynos_drm_gem_obj *exynos_gem_obj;
-       struct exynos_drm_gem_buf *buf;
-
-       exynos_gem_obj = to_exynos_gem_obj(obj);
-       buf = exynos_gem_obj->buffer;
-
-       if (obj->import_attach)
-               drm_prime_gem_destroy(obj, buf->sgt);
-
        exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
 }
 
@@ -673,24 +574,34 @@ unlock:
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
-       struct drm_device *dev = obj->dev;
-       unsigned long f_vaddr;
+       struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+       unsigned long pfn;
        pgoff_t page_offset;
        int ret;
 
        page_offset = ((unsigned long)vmf->virtual_address -
                        vma->vm_start) >> PAGE_SHIFT;
-       f_vaddr = (unsigned long)vmf->virtual_address;
-
-       mutex_lock(&dev->struct_mutex);
 
-       ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
-       if (ret < 0)
-               DRM_ERROR("failed to map a buffer with user.\n");
+       if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
+               DRM_ERROR("invalid page offset\n");
+               ret = -EINVAL;
+               goto out;
+       }
 
-       mutex_unlock(&dev->struct_mutex);
+       pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
+       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
 
-       return convert_to_vm_err_msg(ret);
+out:
+       switch (ret) {
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       default:
+               return VM_FAULT_SIGBUS;
+       }
 }
 
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -709,17 +620,100 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        obj = vma->vm_private_data;
        exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       ret = check_gem_flags(exynos_gem_obj->flags);
-       if (ret) {
-               drm_gem_vm_close(vma);
-               drm_gem_free_mmap_offset(obj);
-               return ret;
-       }
+       DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
 
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP;
+       /* non-cachable as default. */
+       if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
+               vma->vm_page_prot =
+                       pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+       else
+               vma->vm_page_prot =
+                       pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 
-       update_vm_cache_attr(exynos_gem_obj, vma);
+       ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
+       if (ret)
+               goto err_close_vm;
 
        return ret;
+
+err_close_vm:
+       drm_gem_vm_close(vma);
+       drm_gem_free_mmap_offset(obj);
+
+       return ret;
+}
+
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+       struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+       int npages;
+
+       npages = exynos_gem_obj->size >> PAGE_SHIFT;
+
+       return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
+}
+
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+                                    struct dma_buf_attachment *attach,
+                                    struct sg_table *sgt)
+{
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+       int npages;
+       int ret;
+
+       exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
+       if (IS_ERR(exynos_gem_obj)) {
+               ret = PTR_ERR(exynos_gem_obj);
+               return ERR_PTR(ret);
+       }
+
+       exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
+
+       npages = exynos_gem_obj->size >> PAGE_SHIFT;
+       exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (!exynos_gem_obj->pages) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
+                       npages);
+       if (ret < 0)
+               goto err_free_large;
+
+       if (sgt->nents == 1) {
+               /* always physically continuous memory if sgt->nents is 1. */
+               exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+       } else {
+               /*
+                * this case could be CONTIG or NONCONTIG type but for now
+                * sets NONCONTIG.
+                * TODO. we have to find a way that exporter can notify
+                * the type of its own buffer to importer.
+                */
+               exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
+       }
+
+       return &exynos_gem_obj->base;
+
+err_free_large:
+       drm_free_large(exynos_gem_obj->pages);
+err:
+       drm_gem_object_release(&exynos_gem_obj->base);
+       kfree(exynos_gem_obj);
+       return ERR_PTR(ret);
+}
+
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+       return NULL;
+}
+
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+       /* Nothing to do */
 }