drm/gem: Warn on illegal use of the dumb buffer interface v2
[cascardo/linux.git] / drivers / gpu / drm / radeon / radeon_object.c
index 480c87d..76eedd6 100644 (file)
@@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        bo = container_of(tbo, struct radeon_bo, tbo);
 
        radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+       radeon_mn_unregister(bo);
 
        mutex_lock(&bo->rdev->gem.mutex);
        list_del_init(&bo->list);
@@ -96,40 +97,83 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 {
        u32 c = 0, i;
 
-       rbo->placement.fpfn = 0;
-       rbo->placement.lpfn = 0;
        rbo->placement.placement = rbo->placements;
        rbo->placement.busy_placement = rbo->placements;
-       if (domain & RADEON_GEM_DOMAIN_VRAM)
-               rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-                                       TTM_PL_FLAG_VRAM;
+       if (domain & RADEON_GEM_DOMAIN_VRAM) {
+               /* Try placing BOs which don't need CPU access outside of the
+                * CPU accessible part of VRAM
+                */
+               if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
+                   rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
+                       rbo->placements[c].fpfn =
+                               rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+                       rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+                                                    TTM_PL_FLAG_UNCACHED |
+                                                    TTM_PL_FLAG_VRAM;
+               }
+
+               rbo->placements[c].fpfn = 0;
+               rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+                                            TTM_PL_FLAG_UNCACHED |
+                                            TTM_PL_FLAG_VRAM;
+       }
+
        if (domain & RADEON_GEM_DOMAIN_GTT) {
                if (rbo->flags & RADEON_GEM_GTT_UC) {
-                       rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
+                       rbo->placements[c].fpfn = 0;
+                       rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+                               TTM_PL_FLAG_TT;
+
                } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
                           (rbo->rdev->flags & RADEON_IS_AGP)) {
-                       rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+                       rbo->placements[c].fpfn = 0;
+                       rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+                               TTM_PL_FLAG_UNCACHED |
                                TTM_PL_FLAG_TT;
                } else {
-                       rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+                       rbo->placements[c].fpfn = 0;
+                       rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
+                                                    TTM_PL_FLAG_TT;
                }
        }
+
        if (domain & RADEON_GEM_DOMAIN_CPU) {
                if (rbo->flags & RADEON_GEM_GTT_UC) {
-                       rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
+                       rbo->placements[c].fpfn = 0;
+                       rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+                               TTM_PL_FLAG_SYSTEM;
+
                } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
                    rbo->rdev->flags & RADEON_IS_AGP) {
-                       rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+                       rbo->placements[c].fpfn = 0;
+                       rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+                               TTM_PL_FLAG_UNCACHED |
                                TTM_PL_FLAG_SYSTEM;
                } else {
-                       rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+                       rbo->placements[c].fpfn = 0;
+                       rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
+                                                    TTM_PL_FLAG_SYSTEM;
                }
        }
-       if (!c)
-               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       if (!c) {
+               rbo->placements[c].fpfn = 0;
+               rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
+                                            TTM_PL_FLAG_SYSTEM;
+       }
+
        rbo->placement.num_placement = c;
        rbo->placement.num_busy_placement = c;
 
+       for (i = 0; i < c; ++i) {
+               if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
+                   (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+                   !rbo->placements[i].fpfn)
+                       rbo->placements[i].lpfn =
+                               rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+               else
+                       rbo->placements[i].lpfn = 0;
+       }
+
        /*
         * Use two-ended allocation depending on the buffer size to
         * improve fragmentation quality.
@@ -137,14 +181,16 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
         */
        if (rbo->tbo.mem.size > 512 * 1024) {
                for (i = 0; i < c; i++) {
-                       rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN;
+                       rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
                }
        }
 }
 
 int radeon_bo_create(struct radeon_device *rdev,
-                    unsigned long size, int byte_align, bool kernel, u32 domain,
-                    u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
+                    unsigned long size, int byte_align, bool kernel,
+                    u32 domain, u32 flags, struct sg_table *sg,
+                    struct reservation_object *resv,
+                    struct radeon_bo **bo_ptr)
 {
        struct radeon_bo *bo;
        enum ttm_bo_type type;
@@ -192,7 +238,7 @@ int radeon_bo_create(struct radeon_device *rdev,
        down_read(&rdev->pm.mclk_lock);
        r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
                        &bo->placement, page_align, !kernel, NULL,
-                       acc_size, sg, &radeon_ttm_bo_destroy);
+                       acc_size, sg, resv, &radeon_ttm_bo_destroy);
        up_read(&rdev->pm.mclk_lock);
        if (unlikely(r != 0)) {
                return r;
@@ -264,6 +310,9 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
 {
        int r, i;
 
+       if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
+               return -EPERM;
+
        if (bo->pin_count) {
                bo->pin_count++;
                if (gpu_addr)
@@ -283,21 +332,19 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
                return 0;
        }
        radeon_ttm_placement_from_domain(bo, domain);
-       if (domain == RADEON_GEM_DOMAIN_VRAM) {
+       for (i = 0; i < bo->placement.num_placement; i++) {
                /* force to pin into visible video ram */
-               bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
-       }
-       if (max_offset) {
-               u64 lpfn = max_offset >> PAGE_SHIFT;
-
-               if (!bo->placement.lpfn)
-                       bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+               if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+                   !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
+                   (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
+                       bo->placements[i].lpfn =
+                               bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+               else
+                       bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
 
-               if (lpfn < bo->placement.lpfn)
-                       bo->placement.lpfn = lpfn;
+               bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
-       for (i = 0; i < bo->placement.num_placement; i++)
-               bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (likely(r == 0)) {
                bo->pin_count = 1;
@@ -329,8 +376,10 @@ int radeon_bo_unpin(struct radeon_bo *bo)
        bo->pin_count--;
        if (bo->pin_count)
                return 0;
-       for (i = 0; i < bo->placement.num_placement; i++)
-               bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+       for (i = 0; i < bo->placement.num_placement; i++) {
+               bo->placements[i].lpfn = 0;
+               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+       }
        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (likely(r == 0)) {
                if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
@@ -459,7 +508,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
        u64 bytes_moved = 0, initial_bytes_moved;
        u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
 
-       r = ttm_eu_reserve_buffers(ticket, head);
+       r = ttm_eu_reserve_buffers(ticket, head, true);
        if (unlikely(r != 0)) {
                return r;
        }
@@ -468,9 +517,13 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                bo = lobj->robj;
                if (!bo->pin_count) {
                        u32 domain = lobj->prefered_domains;
+                       u32 allowed = lobj->allowed_domains;
                        u32 current_domain =
                                radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
 
+                       WARN_ONCE(bo->gem_base.dumb,
+                                 "GPU use of dumb buffer is illegal.\n");
+
                        /* Check if this buffer will be moved and don't move it
                         * if we have moved too many buffers for this IB already.
                         *
@@ -479,7 +532,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                         * into account. We don't want to disallow buffer moves
                         * completely.
                         */
-                       if ((lobj->allowed_domains & current_domain) != 0 &&
+                       if ((allowed & current_domain) != 0 &&
                            (domain & current_domain) == 0 && /* will be moved */
                            bytes_moved > bytes_moved_threshold) {
                                /* don't move it */
@@ -489,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                retry:
                        radeon_ttm_placement_from_domain(bo, domain);
                        if (ring == R600_RING_TYPE_UVD_INDEX)
-                               radeon_uvd_force_into_uvd_segment(bo);
+                               radeon_uvd_force_into_uvd_segment(bo, allowed);
 
                        initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
@@ -713,8 +766,8 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct radeon_device *rdev;
        struct radeon_bo *rbo;
-       unsigned long offset, size;
-       int r;
+       unsigned long offset, size, lpfn;
+       int i, r;
 
        if (!radeon_ttm_bo_is_radeon_bo(bo))
                return 0;
@@ -731,7 +784,13 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
        /* hurrah the memory is not visible ! */
        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
-       rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       lpfn =  rdev->mc.visible_vram_size >> PAGE_SHIFT;
+       for (i = 0; i < rbo->placement.num_placement; i++) {
+               /* Force into visible VRAM */
+               if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+                   (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
+                       rbo->placements[i].lpfn = lpfn;
+       }
        r = ttm_bo_validate(bo, &rbo->placement, false, false);
        if (unlikely(r == -ENOMEM)) {
                radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -755,12 +814,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
        r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
        if (unlikely(r != 0))
                return r;
-       spin_lock(&bo->tbo.bdev->fence_lock);
        if (mem_type)
                *mem_type = bo->tbo.mem.mem_type;
-       if (bo->tbo.sync_obj)
-               r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
-       spin_unlock(&bo->tbo.bdev->fence_lock);
+
+       r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
        ttm_bo_unreserve(&bo->tbo);
        return r;
 }