drm/i915: Use atomic for dev_priv->mm.bsd_engine_dispatch_index
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
index 58cebaf..9432d4c 100644 (file)
@@ -250,7 +250,6 @@ static void
 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry;
-       struct drm_i915_gem_object *obj = vma->obj;
 
        if (!drm_mm_node_allocated(&vma->node))
                return;
@@ -258,7 +257,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
        entry = vma->exec_entry;
 
        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
-               i915_gem_object_unpin_fence(obj);
+               i915_vma_unpin_fence(vma);
 
        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
                __i915_vma_unpin(vma);
@@ -455,7 +454,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                        if (ret)
                                return ERR_PTR(ret);
                } else {
-                       ret = i915_gem_object_put_fence(obj);
+                       ret = i915_vma_put_fence(vma);
                        if (ret) {
                                i915_vma_unpin(vma);
                                return ERR_PTR(ret);
@@ -475,7 +474,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                offset += page << PAGE_SHIFT;
        }
 
-       vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
+       vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
        cache->page = page;
        cache->vaddr = (unsigned long)vaddr;
 
@@ -811,11 +810,11 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
        if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-               ret = i915_gem_object_get_fence(obj);
+               ret = i915_vma_get_fence(vma);
                if (ret)
                        return ret;
 
-               if (i915_gem_object_pin_fence(obj))
+               if (i915_vma_pin_fence(vma))
                        entry->flags |= __EXEC_OBJECT_HAS_FENCE;
        }
 
@@ -1254,12 +1253,9 @@ static struct i915_gem_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
                          struct intel_engine_cs *engine, const u32 ctx_id)
 {
-       struct i915_gem_context *ctx = NULL;
+       struct i915_gem_context *ctx;
        struct i915_ctx_hang_stats *hs;
 
-       if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
-               return ERR_PTR(-EINVAL);
-
        ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
        if (IS_ERR(ctx))
                return ctx;
@@ -1305,15 +1301,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
                obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
        }
 
-       if (flags & EXEC_OBJECT_NEEDS_FENCE) {
-               i915_gem_active_set(&obj->last_fence, req);
-               if (flags & __EXEC_OBJECT_HAS_FENCE) {
-                       struct drm_i915_private *dev_priv = req->i915;
-
-                       list_move_tail(&dev_priv->fence_regs[obj->fence_reg].link,
-                                      &dev_priv->mm.fence_list);
-               }
-       }
+       if (flags & EXEC_OBJECT_NEEDS_FENCE)
+               i915_gem_active_set(&vma->last_fence, req);
 
        i915_vma_set_active(vma, idx);
        i915_gem_active_set(&vma->last_read[idx], req);
@@ -1520,7 +1509,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
                     params->args_batch_start_offset;
 
        if (exec_len == 0)
-               exec_len = params->batch->size;
+               exec_len = params->batch->size - params->args_batch_start_offset;
 
        ret = params->engine->emit_bb_start(params->request,
                                            exec_start, exec_len,
@@ -1546,13 +1535,9 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
        /* Check whether the file_priv has already selected one ring. */
-       if ((int)file_priv->bsd_engine < 0) {
-               /* If not, use the ping-pong mechanism to select one. */
-               mutex_lock(&dev_priv->drm.struct_mutex);
-               file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
-               dev_priv->mm.bsd_engine_dispatch_index ^= 1;
-               mutex_unlock(&dev_priv->drm.struct_mutex);
-       }
+       if ((int)file_priv->bsd_engine < 0)
+               file_priv->bsd_engine = atomic_fetch_xor(1,
+                        &dev_priv->mm.bsd_engine_dispatch_index);
 
        return file_priv->bsd_engine;
 }
@@ -1746,6 +1731,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                ret = -EINVAL;
                goto err;
        }
+       if (args->batch_start_offset > params->batch->size ||
+           args->batch_len > params->batch->size - args->batch_start_offset) {
+               DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+               ret = -EINVAL;
+               goto err;
+       }
 
        params->args_batch_start_offset = args->batch_start_offset;
        if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {