Merge tag 'mvebu-fixes-4.8-1' of git://git.infradead.org/linux-mvebu into fixes
[cascardo/linux.git] / drivers / gpu / drm / ttm / ttm_bo.c
index a71cf98..42c074a 100644 (file)
@@ -146,10 +146,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
        BUG_ON(bo->mem.mm_node != NULL);
        BUG_ON(!list_empty(&bo->lru));
        BUG_ON(!list_empty(&bo->ddestroy));
-
-       if (bo->ttm)
-               ttm_tt_destroy(bo->ttm);
+       ttm_tt_destroy(bo->ttm);
        atomic_dec(&bo->glob->bo_count);
+       fence_put(bo->moving);
        if (bo->resv == &bo->ttm_resv)
                reservation_object_fini(&bo->ttm_resv);
        mutex_destroy(&bo->wu_mutex);
@@ -355,12 +354,14 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+               ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
+                                     mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
                                         no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+               ret = ttm_bo_move_memcpy(bo, evict, interruptible,
+                                        no_wait_gpu, mem);
 
        if (ret) {
                if (bdev->driver->move_notify) {
@@ -396,8 +397,7 @@ moved:
 
 out_err:
        new_man = &bdev->man[bo->mem.mem_type];
-       if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
-               ttm_tt_unbind(bo->ttm);
+       if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                ttm_tt_destroy(bo->ttm);
                bo->ttm = NULL;
        }
@@ -418,11 +418,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
        if (bo->bdev->driver->move_notify)
                bo->bdev->driver->move_notify(bo, NULL);
 
-       if (bo->ttm) {
-               ttm_tt_unbind(bo->ttm);
-               ttm_tt_destroy(bo->ttm);
-               bo->ttm = NULL;
-       }
+       ttm_tt_destroy(bo->ttm);
+       bo->ttm = NULL;
        ttm_bo_mem_put(bo, &bo->mem);
 
        ww_mutex_unlock (&bo->resv->lock);
@@ -688,15 +685,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        struct ttm_placement placement;
        int ret = 0;
 
-       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS) {
-                       pr_err("Failed to expire sync object before buffer eviction\n");
-               }
-               goto out;
-       }
-
        lockdep_assert_held(&bo->resv->lock.base);
 
        evict_mem = bo->mem;
@@ -720,7 +708,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
                                     no_wait_gpu);
-       if (ret) {
+       if (unlikely(ret)) {
                if (ret != -ERESTARTSYS)
                        pr_err("Buffer eviction failed\n");
                ttm_bo_mem_put(bo, &evict_mem);
@@ -799,6 +787,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 }
 EXPORT_SYMBOL(ttm_bo_mem_put);
 
+/**
+ * Add the last move fence to the BO and reserve a new shared slot.
+ */
+static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+                                struct ttm_mem_type_manager *man,
+                                struct ttm_mem_reg *mem)
+{
+       struct fence *fence;
+       int ret;
+
+       spin_lock(&man->move_lock);
+       fence = fence_get(man->move);
+       spin_unlock(&man->move_lock);
+
+       if (fence) {
+               reservation_object_add_shared_fence(bo->resv, fence);
+
+               ret = reservation_object_reserve_shared(bo->resv);
+               if (unlikely(ret))
+                       return ret;
+
+               fence_put(bo->moving);
+               bo->moving = fence;
+       }
+
+       return 0;
+}
+
 /**
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  * space, or we've evicted everything and there isn't enough space.
@@ -825,10 +841,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
-       if (mem->mm_node == NULL)
-               return -ENOMEM;
        mem->mem_type = mem_type;
-       return 0;
+       return ttm_bo_add_move_fence(bo, man, mem);
 }
 
 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -898,6 +912,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
        bool has_erestartsys = false;
        int i, ret;
 
+       ret = reservation_object_reserve_shared(bo->resv);
+       if (unlikely(ret))
+               return ret;
+
        mem->mm_node = NULL;
        for (i = 0; i < placement->num_placement; ++i) {
                const struct ttm_place *place = &placement->placement[i];
@@ -931,9 +949,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                ret = (*man->func->get_node)(man, bo, place, mem);
                if (unlikely(ret))
                        return ret;
-               
-               if (mem->mm_node)
+
+               if (mem->mm_node) {
+                       ret = ttm_bo_add_move_fence(bo, man, mem);
+                       if (unlikely(ret)) {
+                               (*man->func->put_node)(man, mem);
+                               return ret;
+                       }
                        break;
+               }
        }
 
        if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
@@ -1000,20 +1024,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 
        lockdep_assert_held(&bo->resv->lock.base);
 
-       /*
-        * Don't wait for the BO on initial allocation. This is important when
-        * the BO has an imported reservation object.
-        */
-       if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
-               /*
-                * FIXME: It's possible to pipeline buffer moves.
-                * Have the driver move function wait for idle when necessary,
-                * instead of doing it here.
-                */
-               ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-               if (ret)
-                       return ret;
-       }
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
@@ -1166,7 +1176,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->mem.page_alignment = page_alignment;
        bo->mem.bus.io_reserved_vm = false;
        bo->mem.bus.io_reserved_count = 0;
-       bo->priv_flags = 0;
+       bo->moving = NULL;
        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
        bo->persistent_swap_storage = persistent_swap_storage;
        bo->acc_size = acc_size;
@@ -1278,6 +1288,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        struct ttm_bo_global *glob = bdev->glob;
+       struct fence *fence;
        int ret;
 
        /*
@@ -1298,6 +1309,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
                spin_lock(&glob->lru_lock);
        }
        spin_unlock(&glob->lru_lock);
+
+       spin_lock(&man->move_lock);
+       fence = fence_get(man->move);
+       spin_unlock(&man->move_lock);
+
+       if (fence) {
+               ret = fence_wait(fence, false);
+               fence_put(fence);
+               if (ret) {
+                       if (allow_errors) {
+                               return ret;
+                       } else {
+                               pr_err("Cleanup eviction failed\n");
+                       }
+               }
+       }
+
        return 0;
 }
 
@@ -1317,6 +1345,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
                       mem_type);
                return ret;
        }
+       fence_put(man->move);
 
        man->use_type = false;
        man->has_type = false;
@@ -1362,6 +1391,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        man->io_reserve_fastpath = true;
        man->use_io_reserve_lru = false;
        mutex_init(&man->io_reserve_mutex);
+       spin_lock_init(&man->move_lock);
        INIT_LIST_HEAD(&man->io_reserve_lru);
 
        ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1380,6 +1410,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        man->size = p_size;
 
        INIT_LIST_HEAD(&man->lru);
+       man->move = NULL;
 
        return 0;
 }
@@ -1573,47 +1604,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 int ttm_bo_wait(struct ttm_buffer_object *bo,
                bool interruptible, bool no_wait)
 {
-       struct reservation_object_list *fobj;
-       struct reservation_object *resv;
-       struct fence *excl;
-       long timeout = 15 * HZ;
-       int i;
-
-       resv = bo->resv;
-       fobj = reservation_object_get_list(resv);
-       excl = reservation_object_get_excl(resv);
-       if (excl) {
-               if (!fence_is_signaled(excl)) {
-                       if (no_wait)
-                               return -EBUSY;
-
-                       timeout = fence_wait_timeout(excl,
-                                                    interruptible, timeout);
-               }
-       }
-
-       for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
-               struct fence *fence;
-               fence = rcu_dereference_protected(fobj->shared[i],
-                                               reservation_object_held(resv));
-
-               if (!fence_is_signaled(fence)) {
-                       if (no_wait)
-                               return -EBUSY;
-
-                       timeout = fence_wait_timeout(fence,
-                                                    interruptible, timeout);
-               }
-       }
+       long timeout = no_wait ? 0 : 15 * HZ;
 
+       timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
+                                                     interruptible, timeout);
        if (timeout < 0)
                return timeout;
 
        if (timeout == 0)
                return -EBUSY;
 
-       reservation_object_add_excl_fence(resv, NULL);
-       clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+       reservation_object_add_excl_fence(bo->resv, NULL);
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_wait);
@@ -1683,14 +1684,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
        ttm_bo_list_ref_sub(bo, put_count, true);
 
        /**
-        * Wait for GPU, then move to system cached.
+        * Move to system cached
         */
 
-       ret = ttm_bo_wait(bo, false, false);
-
-       if (unlikely(ret != 0))
-               goto out;
-
        if ((bo->mem.placement & swap_placement) != swap_placement) {
                struct ttm_mem_reg evict_mem;
 
@@ -1705,6 +1701,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                        goto out;
        }
 
+       /**
+        * Make sure BO is idle.
+        */
+
+       ret = ttm_bo_wait(bo, false, false);
+       if (unlikely(ret != 0))
+               goto out;
+
        ttm_bo_unmap_virtual(bo);
 
        /**