ret = ring->add_request(ring);
if (ret)
return ret;
+
+ request->tail = intel_ring_get_tail(ringbuf);
}
request->head = request_start;
- request->tail = intel_ring_get_tail(ringbuf);
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
trace_i915_vma_unbind(vma);
- vma->unbind_vma(vma);
+ vma->vm->unbind_vma(vma);
+ vma->bound = 0;
list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm)) {
}
/**
- * Finds free space in the GTT aperture and binds the object there.
+ * Finds free space in the GTT aperture and binds the object or a view of it
+ * there.
*/
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
- return ERR_PTR(-EINVAL);
+ if (i915_is_ggtt(vm)) {
+ u32 view_size;
+
+ if (WARN_ON(!ggtt_view))
+ return ERR_PTR(-EINVAL);
- fence_size = i915_gem_get_gtt_size(dev,
- obj->base.size,
- obj->tiling_mode);
- fence_alignment = i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, true);
- unfenced_alignment =
- i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, false);
+ view_size = i915_ggtt_view_size(obj, ggtt_view);
+
+ fence_size = i915_gem_get_gtt_size(dev,
+ view_size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ view_size,
+ obj->tiling_mode,
+ true);
+ unfenced_alignment = i915_gem_get_gtt_alignment(dev,
+ view_size,
+ obj->tiling_mode,
+ false);
+ size = flags & PIN_MAPPABLE ? fence_size : view_size;
+ } else {
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode,
+ true);
+ unfenced_alignment =
+ i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode,
+ false);
+ size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
+ }
if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment;
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
- DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
+ DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
+ ggtt_view ? ggtt_view->type : 0,
+ alignment);
return ERR_PTR(-EINVAL);
}
- size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
-
- /* If the object is bigger than the entire aperture, reject it early
- * before evicting everything in a vain attempt to find space.
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
*/
- if (obj->base.size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
- obj->base.size,
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+ ggtt_view ? ggtt_view->type : 0,
+ size,
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return ERR_PTR(-E2BIG);
goto err_remove_node;
trace_i915_vma_bind(vma, flags);
- ret = i915_vma_bind(vma, obj->cache_level,
- flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+ ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
goto err_finish_gtt;
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node)) {
ret = i915_vma_bind(vma, cache_level,
- vma->bound & GLOBAL_BIND);
+ PIN_UPDATE);
if (ret)
return ret;
}
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
- int ret;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (&obj->base == NULL)
+ return -ENOENT;
switch (obj->cache_level) {
case I915_CACHE_LLC:
break;
}
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return 0;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- /* In true PPGTT, bind has possibly changed PDEs, which
- * means we must do a context switch before the GPU can
- * accurately read some of the VMAs.
- */
vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
- }
-
- if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
- ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+ } else {
+ ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
return ret;
}
- if ((bound ^ vma->bound) & GLOBAL_BIND) {
+ if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
+ (bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
u32 fence_size, fence_alignment;
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
- }
- WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+ }
vma->pin_count++;
return 0;