drm/radeon: fence PT updates as shared
authorChristian König <christian.koenig@amd.com>
Thu, 27 Nov 2014 13:48:43 +0000 (14:48 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 3 Dec 2014 19:26:48 +0000 (14:26 -0500)
The BO_VA contains everything necessary.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/radeon_vm.c

index 0423e29..6bc3821 100644 (file)
@@ -798,11 +798,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
  *
  * Global and local mutex must be locked!
  */
-static void radeon_vm_update_ptes(struct radeon_device *rdev,
-                                 struct radeon_vm *vm,
-                                 struct radeon_ib *ib,
-                                 uint64_t start, uint64_t end,
-                                 uint64_t dst, uint32_t flags)
+static int radeon_vm_update_ptes(struct radeon_device *rdev,
+                                struct radeon_vm *vm,
+                                struct radeon_ib *ib,
+                                uint64_t start, uint64_t end,
+                                uint64_t dst, uint32_t flags)
 {
        uint64_t mask = RADEON_VM_PTE_COUNT - 1;
        uint64_t last_pte = ~0, last_dst = ~0;
@@ -815,8 +815,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
                unsigned nptes;
                uint64_t pte;
+               int r;
 
                radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
+               r = reservation_object_reserve_shared(pt->tbo.resv);
+               if (r)
+                       return r;
 
                if ((addr & ~mask) == (end & ~mask))
                        nptes = end - addr;
@@ -850,6 +854,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                                    last_pte + 8 * count,
                                    last_dst, flags);
        }
+
+       return 0;
 }
 
 /**
@@ -874,7 +880,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
        end >>= radeon_vm_block_size;
 
        for (i = start; i <= end; ++i)
-               radeon_bo_fence(vm->page_tables[i].bo, fence, false);
+               radeon_bo_fence(vm->page_tables[i].bo, fence, true);
 }
 
 /**
@@ -983,9 +989,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
                        radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
        }
 
-       radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
-                             bo_va->it.last + 1, addr,
-                             radeon_vm_page_flags(bo_va->flags));
+       r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+                                 bo_va->it.last + 1, addr,
+                                 radeon_vm_page_flags(bo_va->flags));
+       if (r) {
+               radeon_ib_free(rdev, &ib);
+               return r;
+       }
 
        radeon_asic_vm_pad_ib(rdev, &ib);
        WARN_ON(ib.length_dw > ndw);