2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
28 #include <linux/dma-buf.h>
30 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
32 return to_intel_bo(buf->priv);
35 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 enum dma_data_direction dir)
38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
40 struct scatterlist *src, *dst;
43 ret = i915_mutex_lock_interruptible(obj->base.dev);
47 ret = i915_gem_object_get_pages(obj);
53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
56 st = ERR_PTR(-ENOMEM);
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
67 src = obj->pages->sgl;
69 for (i = 0; i < obj->pages->nents; i++) {
70 sg_set_page(dst, sg_page(src), src->length, 0);
75 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
78 st = ERR_PTR(-ENOMEM);
82 i915_gem_object_pin_pages(obj);
85 mutex_unlock(&obj->base.dev->struct_mutex);
89 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
91 enum dma_data_direction dir)
93 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
95 mutex_lock(&obj->base.dev->struct_mutex);
97 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
101 i915_gem_object_unpin_pages(obj);
103 mutex_unlock(&obj->base.dev->struct_mutex);
106 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
108 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
109 struct drm_device *dev = obj->base.dev;
110 struct sg_page_iter sg_iter;
114 ret = i915_mutex_lock_interruptible(dev);
118 if (obj->dma_buf_vmapping) {
119 obj->vmapping_count++;
123 ret = i915_gem_object_get_pages(obj);
129 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
134 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
135 pages[i++] = sg_page_iter_page(&sg_iter);
137 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
138 drm_free_large(pages);
140 if (!obj->dma_buf_vmapping)
143 obj->vmapping_count = 1;
144 i915_gem_object_pin_pages(obj);
146 mutex_unlock(&dev->struct_mutex);
147 return obj->dma_buf_vmapping;
150 mutex_unlock(&dev->struct_mutex);
154 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
156 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
157 struct drm_device *dev = obj->base.dev;
160 ret = i915_mutex_lock_interruptible(dev);
164 if (--obj->vmapping_count == 0) {
165 vunmap(obj->dma_buf_vmapping);
166 obj->dma_buf_vmapping = NULL;
168 i915_gem_object_unpin_pages(obj);
170 mutex_unlock(&dev->struct_mutex);
173 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
178 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
182 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
187 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
192 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
197 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
199 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
200 struct drm_device *dev = obj->base.dev;
202 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
204 ret = i915_mutex_lock_interruptible(dev);
208 ret = i915_gem_object_set_to_cpu_domain(obj, write);
209 mutex_unlock(&dev->struct_mutex);
213 static const struct dma_buf_ops i915_dmabuf_ops = {
214 .map_dma_buf = i915_gem_map_dma_buf,
215 .unmap_dma_buf = i915_gem_unmap_dma_buf,
216 .release = drm_gem_dmabuf_release,
217 .kmap = i915_gem_dmabuf_kmap,
218 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
219 .kunmap = i915_gem_dmabuf_kunmap,
220 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
221 .mmap = i915_gem_dmabuf_mmap,
222 .vmap = i915_gem_dmabuf_vmap,
223 .vunmap = i915_gem_dmabuf_vunmap,
224 .begin_cpu_access = i915_gem_begin_cpu_access,
227 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
228 struct drm_gem_object *gem_obj, int flags)
230 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
233 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
237 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
242 obj->has_dma_mapping = true;
246 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
248 dma_buf_unmap_attachment(obj->base.import_attach,
249 obj->pages, DMA_BIDIRECTIONAL);
250 obj->has_dma_mapping = false;
253 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
254 .get_pages = i915_gem_object_get_pages_dmabuf,
255 .put_pages = i915_gem_object_put_pages_dmabuf,
258 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
259 struct dma_buf *dma_buf)
261 struct dma_buf_attachment *attach;
262 struct drm_i915_gem_object *obj;
265 /* is this one of own objects? */
266 if (dma_buf->ops == &i915_dmabuf_ops) {
267 obj = dma_buf_to_obj(dma_buf);
268 /* is it from our device? */
269 if (obj->base.dev == dev) {
271 * Importing dmabuf exported from out own gem increases
272 * refcount on gem itself instead of f_count of dmabuf.
274 drm_gem_object_reference(&obj->base);
280 attach = dma_buf_attach(dma_buf, dev->dev);
282 return ERR_CAST(attach);
284 get_dma_buf(dma_buf);
286 obj = i915_gem_object_alloc(dev);
292 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
293 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
294 obj->base.import_attach = attach;
299 dma_buf_detach(dma_buf, attach);
300 dma_buf_put(dma_buf);