2 * NVIDIA Tegra DRM GEM helper functions
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
7 * Based on the GEM/CMA helpers
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/dma-buf.h>
17 #include <linux/iommu.h>
18 #include <drm/tegra_drm.h>
23 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
25 return container_of(bo, struct tegra_bo, base);
28 static void tegra_bo_put(struct host1x_bo *bo)
30 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
31 struct drm_device *drm = obj->gem.dev;
33 mutex_lock(&drm->struct_mutex);
34 drm_gem_object_unreference(&obj->gem);
35 mutex_unlock(&drm->struct_mutex);
38 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
40 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
45 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
49 static void *tegra_bo_mmap(struct host1x_bo *bo)
51 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
56 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
60 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
62 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
64 return obj->vaddr + page * PAGE_SIZE;
67 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
72 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
74 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
75 struct drm_device *drm = obj->gem.dev;
77 mutex_lock(&drm->struct_mutex);
78 drm_gem_object_reference(&obj->gem);
79 mutex_unlock(&drm->struct_mutex);
84 static const struct host1x_bo_ops tegra_bo_ops = {
88 .unpin = tegra_bo_unpin,
89 .mmap = tegra_bo_mmap,
90 .munmap = tegra_bo_munmap,
91 .kmap = tegra_bo_kmap,
92 .kunmap = tegra_bo_kunmap,
96 * A generic iommu_map_sg() function is being reviewed and will hopefully be
97 * merged soon. At that point this function can be dropped in favour of the
98 * one provided by the IOMMU API.
100 static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
101 struct scatterlist *sg, unsigned int nents,
104 struct scatterlist *s;
109 for_each_sg(sg, s, nents, i) {
110 phys_addr_t phys = page_to_phys(sg_page(s));
111 size_t length = s->offset + s->length;
113 err = iommu_map(domain, iova + offset, phys, length, prot);
115 iommu_unmap(domain, iova, offset);
125 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
127 int prot = IOMMU_READ | IOMMU_WRITE;
133 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
137 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
140 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
145 bo->paddr = bo->mm->start;
147 err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
148 bo->sgt->nents, prot);
150 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
159 drm_mm_remove_node(bo->mm);
165 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
170 iommu_unmap(tegra->domain, bo->paddr, bo->size);
171 drm_mm_remove_node(bo->mm);
177 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
183 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
185 return ERR_PTR(-ENOMEM);
187 host1x_bo_init(&bo->base, &tegra_bo_ops);
188 size = round_up(size, PAGE_SIZE);
190 err = drm_gem_object_init(drm, &bo->gem, size);
194 err = drm_gem_create_mmap_offset(&bo->gem);
201 drm_gem_object_release(&bo->gem);
207 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
210 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
211 sg_free_table(bo->sgt);
213 } else if (bo->vaddr) {
214 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
219 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
221 struct scatterlist *s;
222 struct sg_table *sgt;
225 bo->pages = drm_gem_get_pages(&bo->gem);
226 if (IS_ERR(bo->pages))
227 return PTR_ERR(bo->pages);
229 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
231 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
236 * Fake up the SG table so that dma_map_sg() can be used to flush the
237 * pages associated with it. Note that this relies on the fact that
238 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
239 * only cache maintenance.
241 * TODO: Replace this by drm_clflash_sg() once it can be implemented
242 * without relying on symbols that are not exported.
244 for_each_sg(sgt->sgl, s, sgt->nents, i)
245 sg_dma_address(s) = sg_phys(s);
247 if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
248 sgt = ERR_PTR(-ENOMEM);
260 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
264 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
266 struct tegra_drm *tegra = drm->dev_private;
270 err = tegra_bo_get_pages(drm, bo);
274 err = tegra_bo_iommu_map(tegra, bo);
276 tegra_bo_free(drm, bo);
280 size_t size = bo->gem.size;
282 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
283 GFP_KERNEL | __GFP_NOWARN);
286 "failed to allocate buffer of size %zu\n",
295 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
301 bo = tegra_bo_alloc_object(drm, size);
305 err = tegra_bo_alloc(drm, bo);
309 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
310 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
312 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
313 bo->flags |= TEGRA_BO_BOTTOM_UP;
318 drm_gem_object_release(&bo->gem);
323 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
324 struct drm_device *drm,
332 bo = tegra_bo_create(drm, size, flags);
336 err = drm_gem_handle_create(file, &bo->gem, handle);
338 tegra_bo_free_object(&bo->gem);
342 drm_gem_object_unreference_unlocked(&bo->gem);
347 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
350 struct tegra_drm *tegra = drm->dev_private;
351 struct dma_buf_attachment *attach;
355 bo = tegra_bo_alloc_object(drm, buf->size);
359 attach = dma_buf_attach(buf, drm->dev);
360 if (IS_ERR(attach)) {
361 err = PTR_ERR(attach);
367 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
373 if (IS_ERR(bo->sgt)) {
374 err = PTR_ERR(bo->sgt);
379 err = tegra_bo_iommu_map(tegra, bo);
383 if (bo->sgt->nents > 1) {
388 bo->paddr = sg_dma_address(bo->sgt->sgl);
391 bo->gem.import_attach = attach;
396 if (!IS_ERR_OR_NULL(bo->sgt))
397 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
399 dma_buf_detach(buf, attach);
402 drm_gem_object_release(&bo->gem);
407 void tegra_bo_free_object(struct drm_gem_object *gem)
409 struct tegra_drm *tegra = gem->dev->dev_private;
410 struct tegra_bo *bo = to_tegra_bo(gem);
413 tegra_bo_iommu_unmap(tegra, bo);
415 if (gem->import_attach) {
416 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
418 drm_prime_gem_destroy(gem, NULL);
420 tegra_bo_free(gem->dev, bo);
423 drm_gem_object_release(gem);
427 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
428 struct drm_mode_create_dumb *args)
430 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
431 struct tegra_drm *tegra = drm->dev_private;
434 args->pitch = round_up(min_pitch, tegra->pitch_align);
435 args->size = args->pitch * args->height;
437 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
445 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
446 u32 handle, u64 *offset)
448 struct drm_gem_object *gem;
451 mutex_lock(&drm->struct_mutex);
453 gem = drm_gem_object_lookup(drm, file, handle);
455 dev_err(drm->dev, "failed to lookup GEM object\n");
456 mutex_unlock(&drm->struct_mutex);
460 bo = to_tegra_bo(gem);
462 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
464 drm_gem_object_unreference(gem);
466 mutex_unlock(&drm->struct_mutex);
471 static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
473 struct drm_gem_object *gem = vma->vm_private_data;
474 struct tegra_bo *bo = to_tegra_bo(gem);
480 return VM_FAULT_SIGBUS;
482 offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
483 page = bo->pages[offset];
485 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
492 return VM_FAULT_NOPAGE;
498 return VM_FAULT_SIGBUS;
501 const struct vm_operations_struct tegra_bo_vm_ops = {
502 .fault = tegra_bo_fault,
503 .open = drm_gem_vm_open,
504 .close = drm_gem_vm_close,
507 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
509 struct drm_gem_object *gem;
513 ret = drm_gem_mmap(file, vma);
517 gem = vma->vm_private_data;
518 bo = to_tegra_bo(gem);
521 unsigned long vm_pgoff = vma->vm_pgoff;
523 vma->vm_flags &= ~VM_PFNMAP;
526 ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
527 bo->paddr, gem->size);
529 drm_gem_vm_close(vma);
533 vma->vm_pgoff = vm_pgoff;
535 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
537 vma->vm_flags |= VM_MIXEDMAP;
538 vma->vm_flags &= ~VM_PFNMAP;
540 vma->vm_page_prot = pgprot_writecombine(prot);
546 static struct sg_table *
547 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
548 enum dma_data_direction dir)
550 struct drm_gem_object *gem = attach->dmabuf->priv;
551 struct tegra_bo *bo = to_tegra_bo(gem);
552 struct sg_table *sgt;
554 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
559 struct scatterlist *sg;
562 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
565 for_each_sg(sgt->sgl, sg, bo->num_pages, i)
566 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
568 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
571 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
574 sg_dma_address(sgt->sgl) = bo->paddr;
575 sg_dma_len(sgt->sgl) = gem->size;
586 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
587 struct sg_table *sgt,
588 enum dma_data_direction dir)
590 struct drm_gem_object *gem = attach->dmabuf->priv;
591 struct tegra_bo *bo = to_tegra_bo(gem);
594 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
600 static void tegra_gem_prime_release(struct dma_buf *buf)
602 drm_gem_dmabuf_release(buf);
605 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
611 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
617 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
622 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
627 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
632 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
634 struct drm_gem_object *gem = buf->priv;
635 struct tegra_bo *bo = to_tegra_bo(gem);
640 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
644 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
645 .map_dma_buf = tegra_gem_prime_map_dma_buf,
646 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
647 .release = tegra_gem_prime_release,
648 .kmap_atomic = tegra_gem_prime_kmap_atomic,
649 .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
650 .kmap = tegra_gem_prime_kmap,
651 .kunmap = tegra_gem_prime_kunmap,
652 .mmap = tegra_gem_prime_mmap,
653 .vmap = tegra_gem_prime_vmap,
654 .vunmap = tegra_gem_prime_vunmap,
657 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
658 struct drm_gem_object *gem,
661 return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
665 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
670 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
671 struct drm_gem_object *gem = buf->priv;
673 if (gem->dev == drm) {
674 drm_gem_object_reference(gem);
679 bo = tegra_bo_import(drm, buf);