drm/i915: Prepare object synchronisation for asynchronicity
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 89a5f8d..4b5364d 100644 (file)
@@ -2818,97 +2818,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        return ret;
 }
 
-static int
-__i915_gem_object_sync(struct drm_i915_gem_request *to,
-                      struct drm_i915_gem_request *from)
-{
-       int ret;
-
-       if (to->engine == from->engine)
-               return 0;
-
-       if (!i915.semaphores) {
-               ret = i915_wait_request(from,
-                                       from->i915->mm.interruptible |
-                                       I915_WAIT_LOCKED,
-                                       NULL,
-                                       NO_WAITBOOST);
-               if (ret)
-                       return ret;
-       } else {
-               int idx = intel_engine_sync_index(from->engine, to->engine);
-               if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
-                       return 0;
-
-               trace_i915_gem_ring_sync_to(to, from);
-               ret = to->engine->semaphore.sync_to(to, from);
-               if (ret)
-                       return ret;
-
-               from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
-       }
-
-       return 0;
-}
-
-/**
- * i915_gem_object_sync - sync an object to a ring.
- *
- * @obj: object which may be in use on another ring.
- * @to: request we are wishing to use
- *
- * This code is meant to abstract object synchronization with the GPU.
- * Conceptually we serialise writes between engines inside the GPU.
- * We only allow one engine to write into a buffer at any time, but
- * multiple readers. To ensure each has a coherent view of memory, we must:
- *
- * - If there is an outstanding write request to the object, the new
- *   request must wait for it to complete (either CPU or in hw, requests
- *   on the same ring will be naturally ordered).
- *
- * - If we are a write request (pending_write_domain is set), the new
- *   request must wait for outstanding read requests to complete.
- *
- * Returns 0 if successful, else propagates up the lower layer error.
- */
-int
-i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                    struct drm_i915_gem_request *to)
-{
-       struct i915_gem_active *active;
-       unsigned long active_mask;
-       int idx;
-
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-       active_mask = i915_gem_object_get_active(obj);
-       if (!active_mask)
-               return 0;
-
-       if (obj->base.pending_write_domain) {
-               active = obj->last_read;
-       } else {
-               active_mask = 1;
-               active = &obj->last_write;
-       }
-
-       for_each_active(active_mask, idx) {
-               struct drm_i915_gem_request *request;
-               int ret;
-
-               request = i915_gem_active_peek(&active[idx],
-                                              &obj->base.dev->struct_mutex);
-               if (!request)
-                       continue;
-
-               ret = __i915_gem_object_sync(to, request);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 static void __i915_vma_iounmap(struct i915_vma *vma)
 {
        GEM_BUG_ON(i915_vma_is_pinned(vma));