Merge tag 'tty-3.13-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[cascardo/linux.git] / drivers / gpu / drm / radeon / radeon_gart.c
index b990b1a..96e4400 100644 (file)
@@ -29,6 +29,7 @@
 #include <drm/radeon_drm.h>
 #include "radeon.h"
 #include "radeon_reg.h"
+#include "radeon_trace.h"
 
 /*
  * GART
@@ -607,8 +608,8 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
  */
 int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
 {
-       unsigned pd_size, pts_size;
-       u64 *pd_addr;
+       unsigned pd_size, pd_entries, pts_size;
+       struct radeon_ib ib;
        int r;
 
        if (vm == NULL) {
@@ -619,8 +620,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
                return 0;
        }
 
-retry:
        pd_size = radeon_vm_directory_size(rdev);
+       pd_entries = radeon_vm_num_pdes(rdev);
+
+retry:
        r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
                             &vm->page_directory, pd_size,
                             RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -637,9 +640,31 @@ retry:
        vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
 
        /* Initially clear the page directory */
-       pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
-       memset(pd_addr, 0, pd_size);
+       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+                         NULL, pd_entries * 2 + 64);
+       if (r) {
+               radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+               return r;
+       }
+
+       ib.length_dw = 0;
+
+       radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
+                               0, pd_entries, 0, 0);
+
+       radeon_semaphore_sync_to(ib.semaphore, vm->fence);
+       r = radeon_ib_schedule(rdev, &ib, NULL);
+       if (r) {
+               radeon_ib_free(rdev, &ib);
+               radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+               return r;
+       }
+       radeon_fence_unref(&vm->fence);
+       vm->fence = radeon_fence_ref(ib.fence);
+       radeon_ib_free(rdev, &ib);
+       radeon_fence_unref(&vm->last_flush);
 
+       /* allocate page table array */
        pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
        vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
 
@@ -713,6 +738,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
        for (i = 0; i < 2; ++i) {
                if (choices[i]) {
                        vm->id = choices[i];
+                       trace_radeon_vm_grab_id(vm->id, ring);
                        return rdev->vm_manager.active[choices[i]];
                }
        }
@@ -913,6 +939,26 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
        return result;
 }
 
+/**
+ * radeon_vm_page_flags - translate page flags to what the hw uses
+ *
+ * @flags: flags comming from userspace
+ *
+ * Translate the flags the userspace ABI uses to hw flags.
+ */
+static uint32_t radeon_vm_page_flags(uint32_t flags)
+{
+        uint32_t hw_flags = 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+        if (flags & RADEON_VM_PAGE_SYSTEM) {
+                hw_flags |= R600_PTE_SYSTEM;
+                hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+        }
+        return hw_flags;
+}
+
 /**
  * radeon_vm_update_pdes - make sure that page directory is valid
  *
@@ -974,7 +1020,11 @@ retry:
                        if (count) {
                                radeon_asic_vm_set_page(rdev, ib, last_pde,
                                                        last_pt, count, incr,
-                                                       RADEON_VM_PAGE_VALID);
+                                                       R600_PTE_VALID);
+
+                               count *= RADEON_VM_PTE_COUNT;
+                               radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+                                                       count, 0, 0);
                        }
 
                        count = 1;
@@ -987,8 +1037,11 @@ retry:
 
        if (count) {
                radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
-                                       incr, RADEON_VM_PAGE_VALID);
+                                       incr, R600_PTE_VALID);
 
+               count *= RADEON_VM_PTE_COUNT;
+               radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+                                       count, 0, 0);
        }
 
        return 0;
@@ -1065,7 +1118,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
 }
 
 /**
- * radeon_vm_bo_update_pte - map a bo into the vm page table
+ * radeon_vm_bo_update - map a bo into the vm page table
  *
  * @rdev: radeon_device pointer
  * @vm: requested vm
@@ -1077,12 +1130,11 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
  *
  * Object have to be reserved & global and local mutex must be locked!
  */
-int radeon_vm_bo_update_pte(struct radeon_device *rdev,
-                           struct radeon_vm *vm,
-                           struct radeon_bo *bo,
-                           struct ttm_mem_reg *mem)
+int radeon_vm_bo_update(struct radeon_device *rdev,
+                       struct radeon_vm *vm,
+                       struct radeon_bo *bo,
+                       struct ttm_mem_reg *mem)
 {
-       unsigned ridx = rdev->asic->vm.pt_ring_index;
        struct radeon_ib ib;
        struct radeon_bo_va *bo_va;
        unsigned nptes, npdes, ndw;
@@ -1126,6 +1178,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
                bo_va->valid = false;
        }
 
+       trace_radeon_vm_bo_update(bo_va);
+
        nptes = radeon_bo_ngpu_pages(bo);
 
        /* assume two extra pdes in case the mapping overlaps the borders */
@@ -1151,11 +1205,16 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
        /* reserve space for pde addresses */
        ndw += npdes * 2;
 
+       /* reserve space for clearing new page tables */
+       ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
+
        /* update too big for an IB */
        if (ndw > 0xfffff)
                return -ENOMEM;
 
-       r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+       r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
+       if (r)
+               return r;
        ib.length_dw = 0;
 
        r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1165,9 +1224,9 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
        }
 
        radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
-                             addr, bo_va->flags);
+                             addr, radeon_vm_page_flags(bo_va->flags));
 
-       radeon_ib_sync_to(&ib, vm->fence);
+       radeon_semaphore_sync_to(ib.semaphore, vm->fence);
        r = radeon_ib_schedule(rdev, &ib, NULL);
        if (r) {
                radeon_ib_free(rdev, &ib);
@@ -1202,7 +1261,7 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
        mutex_lock(&rdev->vm_manager.lock);
        mutex_lock(&bo_va->vm->mutex);
        if (bo_va->soffset) {
-               r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+               r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
        }
        mutex_unlock(&rdev->vm_manager.lock);
        list_del(&bo_va->vm_list);