i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
- struct drm_i915_gem_object *obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node))
return;
entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
- i915_gem_object_unpin_fence(obj);
+ i915_vma_unpin_fence(vma);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
__i915_vma_unpin(vma);
if (ret)
return ERR_PTR(ret);
} else {
- ret = i915_gem_object_put_fence(obj);
+ ret = i915_vma_put_fence(vma);
if (ret) {
i915_vma_unpin(vma);
return ERR_PTR(ret);
offset += page << PAGE_SHIFT;
}
- vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
entry->flags |= __EXEC_OBJECT_HAS_PIN;
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- ret = i915_gem_object_get_fence(obj);
+ ret = i915_vma_get_fence(vma);
if (ret)
return ret;
- if (i915_gem_object_pin_fence(obj))
+ if (i915_vma_pin_fence(vma))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
}
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *engine, const u32 ctx_id)
{
- struct i915_gem_context *ctx = NULL;
+ struct i915_gem_context *ctx;
struct i915_ctx_hang_stats *hs;
- if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
- return ERR_PTR(-EINVAL);
-
ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
- if (flags & EXEC_OBJECT_NEEDS_FENCE) {
- i915_gem_active_set(&obj->last_fence, req);
- if (flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = req->i915;
-
- list_move_tail(&dev_priv->fence_regs[obj->fence_reg].link,
- &dev_priv->mm.fence_list);
- }
- }
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ i915_gem_active_set(&vma->last_fence, req);
i915_vma_set_active(vma, idx);
i915_gem_active_set(&vma->last_read[idx], req);
params->args_batch_start_offset;
if (exec_len == 0)
- exec_len = params->batch->size;
+ exec_len = params->batch->size - params->args_batch_start_offset;
ret = params->engine->emit_bb_start(params->request,
exec_start, exec_len,
struct drm_i915_file_private *file_priv = file->driver_priv;
/* Check whether the file_priv has already selected one ring. */
- if ((int)file_priv->bsd_engine < 0) {
- /* If not, use the ping-pong mechanism to select one. */
- mutex_lock(&dev_priv->drm.struct_mutex);
- file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
- dev_priv->mm.bsd_engine_dispatch_index ^= 1;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
+ if ((int)file_priv->bsd_engine < 0)
+ file_priv->bsd_engine = atomic_fetch_xor(1,
+ &dev_priv->mm.bsd_engine_dispatch_index);
return file_priv->bsd_engine;
}
ret = -EINVAL;
goto err;
}
+ if (args->batch_start_offset > params->batch->size ||
+ args->batch_len > params->batch->size - args->batch_start_offset) {
+ DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+ ret = -EINVAL;
+ goto err;
+ }
params->args_batch_start_offset = args->batch_start_offset;
if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {