drm/i915: fix reference counting in i915_gem_create
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 970ad17..0a1ddb8 100644 (file)
@@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-       return obj->gtt_space && !obj->active;
+       return i915_gem_obj_ggtt_bound(obj) && !obj->active;
 }
 
 int
@@ -176,12 +176,12 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 
        pinned = 0;
        mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
                if (obj->pin_count)
-                       pinned += obj->gtt_space->size;
+                       pinned += i915_gem_obj_ggtt_size(obj);
        mutex_unlock(&dev->struct_mutex);
 
-       args->aper_size = dev_priv->gtt.total;
+       args->aper_size = dev_priv->gtt.base.total;
        args->aper_available_size = args->aper_size - pinned;
 
        return 0;
@@ -219,16 +219,10 @@ i915_gem_create(struct drm_file *file,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file, &obj->base, &handle);
-       if (ret) {
-               drm_gem_object_release(&obj->base);
-               i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
-               i915_gem_object_free(obj);
-               return ret;
-       }
-
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference(&obj->base);
-       trace_i915_gem_object_create(obj);
+       drm_gem_object_unreference_unlocked(&obj->base);
+       if (ret)
+               return ret;
 
        *handle_p = handle;
        return 0;
@@ -422,7 +416,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
                 * anyway again before the next pread happens. */
                if (obj->cache_level == I915_CACHE_NONE)
                        needs_clflush = 1;
-               if (obj->gtt_space) {
+               if (i915_gem_obj_ggtt_bound(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, false);
                        if (ret)
                                return ret;
@@ -465,7 +459,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
                mutex_unlock(&dev->struct_mutex);
 
-               if (!prefaulted) {
+               if (likely(!i915_prefault_disable) && !prefaulted) {
                        ret = fault_in_multipages_writeable(user_data, remain);
                        /* Userspace is tricking us, but we've already clobbered
                         * its pages with the prefault and promised to write the
@@ -609,7 +603,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
 
-       offset = obj->gtt_offset + args->offset;
+       offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
        while (remain > 0) {
                /* Operation in this page
@@ -739,7 +733,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                 * right away and we therefore have to clflush anyway. */
                if (obj->cache_level == I915_CACHE_NONE)
                        needs_clflush_after = 1;
-               if (obj->gtt_space) {
+               if (i915_gem_obj_ggtt_bound(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, true);
                        if (ret)
                                return ret;
@@ -860,10 +854,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                       args->size))
                return -EFAULT;
 
-       ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
-                                          args->size);
-       if (ret)
-               return -EFAULT;
+       if (likely(!i915_prefault_disable)) {
+               ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+                                                  args->size);
+               if (ret)
+                       return -EFAULT;
+       }
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -956,7 +952,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
 
        ret = 0;
        if (seqno == ring->outstanding_lazy_request)
-               ret = i915_add_request(ring, NULL, NULL);
+               ret = i915_add_request(ring, NULL);
 
        return ret;
 }
@@ -1087,6 +1083,25 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
                            interruptible, NULL);
 }
 
+static int
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
+                                    struct intel_ring_buffer *ring)
+{
+       i915_gem_retire_requests_ring(ring);
+
+       /* Manually manage the write flush as we may have not yet
+        * retired the buffer.
+        *
+        * Note that the last_write_seqno is always the earlier of
+        * the two (read/write) seqno, so if we haved successfully waited,
+        * we know we have passed the last write.
+        */
+       obj->last_write_seqno = 0;
+       obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+
+       return 0;
+}
+
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -1107,18 +1122,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       i915_gem_retire_requests_ring(ring);
-
-       /* Manually manage the write flush as we may have not yet
-        * retired the buffer.
-        */
-       if (obj->last_write_seqno &&
-           i915_seqno_passed(seqno, obj->last_write_seqno)) {
-               obj->last_write_seqno = 0;
-               obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
-       }
-
-       return 0;
+       return i915_gem_object_wait_rendering__tail(obj, ring);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1154,19 +1158,10 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        mutex_unlock(&dev->struct_mutex);
        ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
        mutex_lock(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
-       i915_gem_retire_requests_ring(ring);
-
-       /* Manually manage the write flush as we may have not yet
-        * retired the buffer.
-        */
-       if (obj->last_write_seqno &&
-           i915_seqno_passed(seqno, obj->last_write_seqno)) {
-               obj->last_write_seqno = 0;
-               obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
-       }
-
-       return ret;
+       return i915_gem_object_wait_rendering__tail(obj, ring);
 }
 
 /**
@@ -1361,8 +1356,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        obj->fault_mappable = true;
 
-       pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
-               page_offset;
+       pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+       pfn >>= PAGE_SHIFT;
+       pfn += page_offset;
 
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1668,7 +1664,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages == NULL)
                return 0;
 
-       BUG_ON(obj->gtt_space);
+       BUG_ON(i915_gem_obj_ggtt_bound(obj));
 
        if (obj->pages_pin_count)
                return -EBUSY;
@@ -1676,7 +1672,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        /* ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
         * lists early. */
-       list_del(&obj->gtt_list);
+       list_del(&obj->global_list);
 
        ops->put_pages(obj);
        obj->pages = NULL;
@@ -1692,11 +1688,12 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
                  bool purgeable_only)
 {
        struct drm_i915_gem_object *obj, *next;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        long count = 0;
 
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.unbound_list,
-                                gtt_list) {
+                                global_list) {
                if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
@@ -1705,9 +1702,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
                }
        }
 
-       list_for_each_entry_safe(obj, next,
-                                &dev_priv->mm.inactive_list,
-                                mm_list) {
+       list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
                if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_unbind(obj) == 0 &&
                    i915_gem_object_put_pages(obj) == 0) {
@@ -1733,7 +1728,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 
        i915_gem_evict_everything(dev_priv->dev);
 
-       list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+       list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
+                                global_list)
                i915_gem_object_put_pages(obj);
 }
 
@@ -1801,7 +1797,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                        gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
                        gfp &= ~(__GFP_IO | __GFP_WAIT);
                }
-
+#ifdef CONFIG_SWIOTLB
+               if (swiotlb_nr_tbl()) {
+                       st->nents++;
+                       sg_set_page(sg, page, PAGE_SIZE, 0);
+                       sg = sg_next(sg);
+                       continue;
+               }
+#endif
                if (!i || page_to_pfn(page) != last_pfn + 1) {
                        if (i)
                                sg = sg_next(sg);
@@ -1812,8 +1815,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                }
                last_pfn = page_to_pfn(page);
        }
-
-       sg_mark_end(sg);
+#ifdef CONFIG_SWIOTLB
+       if (!swiotlb_nr_tbl())
+#endif
+               sg_mark_end(sg);
        obj->pages = st;
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1858,7 +1863,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        if (ret)
                return ret;
 
-       list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+       list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
        return 0;
 }
 
@@ -1868,6 +1873,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
@@ -1880,7 +1886,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        }
 
        /* Move from whatever list we were on to the tail of execution. */
-       list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+       list_move_tail(&obj->mm_list, &vm->active_list);
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
@@ -1904,11 +1910,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
 
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       list_move_tail(&obj->mm_list, &vm->inactive_list);
 
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
@@ -1996,17 +2003,18 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
        return 0;
 }
 
-int
-i915_add_request(struct intel_ring_buffer *ring,
-                struct drm_file *file,
-                u32 *out_seqno)
+int __i915_add_request(struct intel_ring_buffer *ring,
+                      struct drm_file *file,
+                      struct drm_i915_gem_object *obj,
+                      u32 *out_seqno)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
-       u32 request_ring_position;
+       u32 request_ring_position, request_start;
        int was_empty;
        int ret;
 
+       request_start = intel_ring_get_tail(ring);
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
@@ -2038,7 +2046,21 @@ i915_add_request(struct intel_ring_buffer *ring,
 
        request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
+       request->head = request_start;
        request->tail = request_ring_position;
+       request->ctx = ring->last_context;
+       request->batch_obj = obj;
+
+       /* Whilst this request exists, batch_obj will be on the
+        * active_list, and so will hold the active reference. Only when this
+        * request is retired will the the batch_obj be moved onto the
+        * inactive_list and lose its active reference. Hence we do not need
+        * to explicitly hold another reference here.
+        */
+
+       if (request->ctx)
+               i915_gem_context_reference(request->ctx);
+
        request->emitted_jiffies = jiffies;
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
@@ -2057,11 +2079,9 @@ i915_add_request(struct intel_ring_buffer *ring,
        trace_i915_gem_request_add(ring, request->seqno);
        ring->outstanding_lazy_request = 0;
 
-       if (!dev_priv->mm.suspended) {
-               if (i915_enable_hangcheck) {
-                       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
-               }
+       if (!dev_priv->ums.mm_suspended) {
+               i915_queue_hangcheck(ring->dev);
+
                if (was_empty) {
                        queue_delayed_work(dev_priv->wq,
                                           &dev_priv->mm.retire_work,
@@ -2091,9 +2111,114 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+{
+       if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+           acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+               return true;
+
+       return false;
+}
+
+static bool i915_head_inside_request(const u32 acthd_unmasked,
+                                    const u32 request_start,
+                                    const u32 request_end)
+{
+       const u32 acthd = acthd_unmasked & HEAD_ADDR;
+
+       if (request_start < request_end) {
+               if (acthd >= request_start && acthd < request_end)
+                       return true;
+       } else if (request_start > request_end) {
+               if (acthd >= request_start || acthd < request_end)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool i915_request_guilty(struct drm_i915_gem_request *request,
+                               const u32 acthd, bool *inside)
+{
+       /* There is a possibility that unmasked head address
+        * pointing inside the ring, matches the batch_obj address range.
+        * However this is extremely unlikely.
+        */
+
+       if (request->batch_obj) {
+               if (i915_head_inside_object(acthd, request->batch_obj)) {
+                       *inside = true;
+                       return true;
+               }
+       }
+
+       if (i915_head_inside_request(acthd, request->head, request->tail)) {
+               *inside = false;
+               return true;
+       }
+
+       return false;
+}
+
+static void i915_set_reset_status(struct intel_ring_buffer *ring,
+                                 struct drm_i915_gem_request *request,
+                                 u32 acthd)
+{
+       struct i915_ctx_hang_stats *hs = NULL;
+       bool inside, guilty;
+
+       /* Innocent until proven guilty */
+       guilty = false;
+
+       if (ring->hangcheck.action != wait &&
+           i915_request_guilty(request, acthd, &inside)) {
+               DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
+                         ring->name,
+                         inside ? "inside" : "flushing",
+                         request->batch_obj ?
+                         i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
+                         request->ctx ? request->ctx->id : 0,
+                         acthd);
+
+               guilty = true;
+       }
+
+       /* If contexts are disabled or this is the default context, use
+        * file_priv->reset_state
+        */
+       if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
+               hs = &request->ctx->hang_stats;
+       else if (request->file_priv)
+               hs = &request->file_priv->hang_stats;
+
+       if (hs) {
+               if (guilty)
+                       hs->batch_active++;
+               else
+                       hs->batch_pending++;
+       }
+}
+
+static void i915_gem_free_request(struct drm_i915_gem_request *request)
+{
+       list_del(&request->list);
+       i915_gem_request_remove_from_client(request);
+
+       if (request->ctx)
+               i915_gem_context_unreference(request->ctx);
+
+       kfree(request);
+}
+
 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                      struct intel_ring_buffer *ring)
 {
+       u32 completed_seqno;
+       u32 acthd;
+
+       acthd = intel_ring_get_active_head(ring);
+       completed_seqno = ring->get_seqno(ring, false);
+
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -2101,9 +2226,10 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                           struct drm_i915_gem_request,
                                           list);
 
-               list_del(&request->list);
-               i915_gem_request_remove_from_client(request);
-               kfree(request);
+               if (request->seqno > completed_seqno)
+                       i915_set_reset_status(ring, request, acthd);
+
+               i915_gem_free_request(request);
        }
 
        while (!list_empty(&ring->active_list)) {
@@ -2141,6 +2267,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
 void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        struct intel_ring_buffer *ring;
        int i;
@@ -2151,12 +2278,8 @@ void i915_gem_reset(struct drm_device *dev)
        /* Move everything out of the GPU domains to ensure we do any
         * necessary invalidation upon reuse.
         */
-       list_for_each_entry(obj,
-                           &dev_priv->mm.inactive_list,
-                           mm_list)
-       {
+       list_for_each_entry(obj, &vm->inactive_list, mm_list)
                obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-       }
 
        /* The fence registers are invalidated so clear them out */
        i915_gem_reset_fences(dev);
@@ -2195,9 +2318,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                 */
                ring->last_retired_head = request->tail;
 
-               list_del(&request->list);
-               i915_gem_request_remove_from_client(request);
-               kfree(request);
+               i915_gem_free_request(request);
        }
 
        /* Move any buffers on the active list that are no longer referenced
@@ -2264,12 +2385,12 @@ i915_gem_retire_work_handler(struct work_struct *work)
        idle = true;
        for_each_ring(ring, dev_priv, i) {
                if (ring->gpu_caches_dirty)
-                       i915_add_request(ring, NULL, NULL);
+                       i915_add_request(ring, NULL);
 
                idle &= list_empty(&ring->request_list);
        }
 
-       if (!dev_priv->mm.suspended && !idle)
+       if (!dev_priv->ums.mm_suspended && !idle)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
                                   round_jiffies_up_relative(HZ));
        if (idle)
@@ -2462,9 +2583,10 @@ int
 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+       struct i915_vma *vma;
        int ret;
 
-       if (obj->gtt_space == NULL)
+       if (!i915_gem_obj_ggtt_bound(obj))
                return 0;
 
        if (obj->pin_count)
@@ -2496,15 +2618,23 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
                obj->has_aliasing_ppgtt_mapping = 0;
        }
        i915_gem_gtt_finish_object(obj);
+       i915_gem_object_unpin_pages(obj);
 
        list_del(&obj->mm_list);
-       list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
        /* Avoid an unnecessary call to unbind on rebind. */
        obj->map_and_fenceable = true;
 
-       drm_mm_put_block(obj->gtt_space);
-       obj->gtt_space = NULL;
-       obj->gtt_offset = 0;
+       vma = __i915_gem_obj_to_vma(obj);
+       list_del(&vma->vma_link);
+       drm_mm_remove_node(&vma->node);
+       i915_gem_vma_destroy(vma);
+
+       /* Since the unbound list is global, only move to that list if
+        * no more VMAs exist.
+        * NB: Until we have real VMAs there will only ever be one */
+       WARN_ON(!list_empty(&obj->vma_list));
+       if (list_empty(&obj->vma_list))
+               list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
        return 0;
 }
@@ -2546,11 +2676,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
        }
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
 
-               val = (uint64_t)((obj->gtt_offset + size - 4096) &
+               val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
                                 0xfffff000) << 32;
-               val |= obj->gtt_offset & 0xfffff000;
+               val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
                val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2570,15 +2700,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
        u32 val;
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                int pitch_val;
                int tile_width;
 
-               WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
                     (size & -size) != size ||
-                    (obj->gtt_offset & (size - 1)),
-                    "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-                    obj->gtt_offset, obj->map_and_fenceable, size);
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
 
                if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
                        tile_width = 128;
@@ -2589,7 +2719,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
                pitch_val = obj->stride / tile_width;
                pitch_val = ffs(pitch_val) - 1;
 
-               val = obj->gtt_offset;
+               val = i915_gem_obj_ggtt_offset(obj);
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I830_FENCE_TILING_Y_SHIFT;
                val |= I915_FENCE_SIZE_BITS(size);
@@ -2614,19 +2744,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
        uint32_t val;
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                uint32_t pitch_val;
 
-               WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
                     (size & -size) != size ||
-                    (obj->gtt_offset & (size - 1)),
-                    "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
-                    obj->gtt_offset, size);
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), size);
 
                pitch_val = obj->stride / 128;
                pitch_val = ffs(pitch_val) - 1;
 
-               val = obj->gtt_offset;
+               val = i915_gem_obj_ggtt_offset(obj);
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I830_FENCE_TILING_Y_SHIFT;
                val |= I830_FENCE_SIZE_BITS(size);
@@ -2678,18 +2808,33 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
        return fence - dev_priv->fence_regs;
 }
 
+struct write_fence {
+       struct drm_device *dev;
+       struct drm_i915_gem_object *obj;
+       int fence;
+};
+
 static void i915_gem_write_fence__ipi(void *data)
 {
+       struct write_fence *args = data;
+
+       /* Required for SNB+ with LLC */
        wbinvd();
+
+       /* Required for VLV */
+       i915_gem_write_fence(args->dev, args->fence, args->obj);
 }
 
 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int fence_reg = fence_number(dev_priv, fence);
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct write_fence args = {
+               .dev = obj->base.dev,
+               .fence = fence_number(dev_priv, fence),
+               .obj = enable ? obj : NULL,
+       };
 
        /* In order to fully serialize access to the fenced region and
         * the update to the fence register we need to take extreme
@@ -2700,13 +2845,19 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
         * SNB+ we need to take a step further and emit an explicit wbinvd()
         * on each processor in order to manually flush all memory
         * transactions before updating the fence register.
+        *
+        * However, Valleyview complicates matter. There the wbinvd is
+        * insufficient and unlike SNB/IVB requires the serialising
+        * register write. (Note that that register write by itself is
+        * conversely not sufficient for SNB+.) To compromise, we do both.
         */
-       if (HAS_LLC(obj->base.dev))
-               on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
-       i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
+       if (INTEL_INFO(args.dev)->gen >= 6)
+               on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
+       else
+               i915_gem_write_fence(args.dev, args.fence, args.obj);
 
        if (enable) {
-               obj->fence_reg = fence_reg;
+               obj->fence_reg = args.fence;
                fence->obj = obj;
                list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
        } else {
@@ -2861,7 +3012,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
        if (HAS_LLC(dev))
                return true;
 
-       if (gtt_space == NULL)
+       if (!drm_mm_node_allocated(gtt_space))
                return true;
 
        if (list_empty(&gtt_space->node_list))
@@ -2885,7 +3036,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
        struct drm_i915_gem_object *obj;
        int err = 0;
 
-       list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+       list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
                if (obj->gtt_space == NULL) {
                        printk(KERN_ERR "object found on GTT list with no space reserved\n");
                        err++;
@@ -2894,8 +3045,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
 
                if (obj->cache_level != obj->gtt_space->color) {
                        printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
-                              obj->gtt_space->start,
-                              obj->gtt_space->start + obj->gtt_space->size,
+                              i915_gem_obj_ggtt_offset(obj),
+                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
                               obj->cache_level,
                               obj->gtt_space->color);
                        err++;
@@ -2906,8 +3057,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
                                              obj->gtt_space,
                                              obj->cache_level)) {
                        printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
-                              obj->gtt_space->start,
-                              obj->gtt_space->start + obj->gtt_space->size,
+                              i915_gem_obj_ggtt_offset(obj),
+                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
                               obj->cache_level);
                        err++;
                        continue;
@@ -2929,11 +3080,17 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_mm_node *node;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
+       size_t gtt_max = map_and_fenceable ?
+               dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+       struct i915_vma *vma;
        int ret;
 
+       if (WARN_ON(!list_empty(&obj->vma_list)))
+               return -EBUSY;
+
        fence_size = i915_gem_get_gtt_size(dev,
                                           obj->base.size,
                                           obj->tiling_mode);
@@ -2958,9 +3115,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
-       if (obj->base.size >
-           (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
-               DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+       if (obj->base.size > gtt_max) {
+               DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+                         obj->base.size,
+                         map_and_fenceable ? "mappable" : "total",
+                         gtt_max);
                return -E2BIG;
        }
 
@@ -2970,20 +3129,17 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (node == NULL) {
-               i915_gem_object_unpin_pages(obj);
-               return -ENOMEM;
+       vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unpin;
        }
 
- search_free:
-       if (map_and_fenceable)
-               ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
-                                                         size, alignment, obj->cache_level,
-                                                         0, dev_priv->gtt.mappable_end);
-       else
-               ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
-                                                size, alignment, obj->cache_level);
+search_free:
+       ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+                                                 &vma->node,
+                                                 size, alignment,
+                                                 obj->cache_level, 0, gtt_max);
        if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
@@ -2992,42 +3148,42 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                if (ret == 0)
                        goto search_free;
 
-               i915_gem_object_unpin_pages(obj);
-               kfree(node);
-               return ret;
+               goto err_free_vma;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
-               i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(node);
-               return -EINVAL;
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+                                             obj->cache_level))) {
+               ret = -EINVAL;
+               goto err_remove_node;
        }
 
        ret = i915_gem_gtt_prepare_object(obj);
-       if (ret) {
-               i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(node);
-               return ret;
-       }
-
-       list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       if (ret)
+               goto err_remove_node;
 
-       obj->gtt_space = node;
-       obj->gtt_offset = node->start;
+       list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
+       list_add_tail(&obj->mm_list, &vm->inactive_list);
+       list_add(&vma->vma_link, &obj->vma_list);
 
        fenceable =
-               node->size == fence_size &&
-               (node->start & (fence_alignment - 1)) == 0;
+               i915_gem_obj_ggtt_size(obj) == fence_size &&
+               (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
 
-       mappable =
-               obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+       mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
+               dev_priv->gtt.mappable_end;
 
        obj->map_and_fenceable = mappable && fenceable;
 
-       i915_gem_object_unpin_pages(obj);
        trace_i915_gem_object_bind(obj, map_and_fenceable);
        i915_gem_verify_gtt(dev);
        return 0;
+
+err_remove_node:
+       drm_mm_remove_node(&vma->node);
+err_free_vma:
+       i915_gem_vma_destroy(vma);
+err_unpin:
+       i915_gem_object_unpin_pages(obj);
+       return ret;
 }
 
 void
@@ -3123,7 +3279,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        int ret;
 
        /* Not valid to be called on unbound objects. */
-       if (obj->gtt_space == NULL)
+       if (!i915_gem_obj_ggtt_bound(obj))
                return -EINVAL;
 
        if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3162,7 +3318,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
        /* And bump the LRU for this access */
        if (i915_gem_object_is_inactive(obj))
-               list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+               list_move_tail(&obj->mm_list,
+                              &dev_priv->gtt.base.inactive_list);
 
        return 0;
 }
@@ -3172,6 +3329,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
        int ret;
 
        if (obj->cache_level == cache_level)
@@ -3182,13 +3340,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                return -EBUSY;
        }
 
-       if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+       if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
                ret = i915_gem_object_unbind(obj);
                if (ret)
                        return ret;
        }
 
-       if (obj->gtt_space) {
+       if (i915_gem_obj_ggtt_bound(obj)) {
                ret = i915_gem_object_finish_gpu(obj);
                if (ret)
                        return ret;
@@ -3211,7 +3369,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                        i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
                                               obj, cache_level);
 
-               obj->gtt_space->color = cache_level;
+               i915_gem_obj_ggtt_set_color(obj, cache_level);
        }
 
        if (cache_level == I915_CACHE_NONE) {
@@ -3492,14 +3650,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                return -EBUSY;
 
-       if (obj->gtt_space != NULL) {
-               if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+       if (i915_gem_obj_ggtt_bound(obj)) {
+               if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
                    (map_and_fenceable && !obj->map_and_fenceable)) {
                        WARN(obj->pin_count,
                             "bo is already pinned with incorrect alignment:"
-                            " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+                            " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            obj->gtt_offset, alignment,
+                            i915_gem_obj_ggtt_offset(obj), alignment,
                             map_and_fenceable,
                             obj->map_and_fenceable);
                        ret = i915_gem_object_unbind(obj);
@@ -3508,7 +3666,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                }
        }
 
-       if (obj->gtt_space == NULL) {
+       if (!i915_gem_obj_ggtt_bound(obj)) {
                struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
                ret = i915_gem_object_bind_to_gtt(obj, alignment,
@@ -3534,7 +3692,7 @@ void
 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
        BUG_ON(obj->pin_count == 0);
-       BUG_ON(obj->gtt_space == NULL);
+       BUG_ON(!i915_gem_obj_ggtt_bound(obj));
 
        if (--obj->pin_count == 0)
                obj->pin_mappable = false;
@@ -3584,7 +3742,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
         * as the X server doesn't manage domains yet
         */
        i915_gem_object_flush_cpu_write_domain(obj);
-       args->offset = obj->gtt_offset;
+       args->offset = i915_gem_obj_ggtt_offset(obj);
 out:
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -3724,9 +3882,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops)
 {
        INIT_LIST_HEAD(&obj->mm_list);
-       INIT_LIST_HEAD(&obj->gtt_list);
+       INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
+       INIT_LIST_HEAD(&obj->vma_list);
 
        obj->ops = ops;
 
@@ -3791,6 +3950,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        } else
                obj->cache_level = I915_CACHE_NONE;
 
+       trace_i915_gem_object_create(obj);
+
        return obj;
 }
 
@@ -3824,7 +3985,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                dev_priv->mm.interruptible = was_interruptible;
        }
 
-       obj->pages_pin_count = 0;
+       /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
+        * before progressing. */
+       if (obj->stolen)
+               i915_gem_object_unpin_pages(obj);
+
+       if (WARN_ON(obj->pages_pin_count))
+               obj->pages_pin_count = 0;
        i915_gem_object_put_pages(obj);
        i915_gem_object_free_mmap_offset(obj);
        i915_gem_object_release_stolen(obj);
@@ -3841,15 +4008,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        i915_gem_object_free(obj);
 }
 
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm)
+{
+       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       if (vma == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&vma->vma_link);
+       vma->vm = vm;
+       vma->obj = obj;
+
+       return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+       WARN_ON(vma->node.allocated);
+       kfree(vma);
+}
+
 int
 i915_gem_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
-       mutex_lock(&dev->struct_mutex);
-
-       if (dev_priv->mm.suspended) {
+       if (dev_priv->ums.mm_suspended) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -3867,18 +4052,11 @@ i915_gem_idle(struct drm_device *dev)
 
        i915_gem_reset_fences(dev);
 
-       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
-        * We need to replace this with a semaphore, or something.
-        * And not confound mm.suspended!
-        */
-       dev_priv->mm.suspended = 1;
        del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
 
-       mutex_unlock(&dev->struct_mutex);
-
        /* Cancel the retire work handler, which should be idle now. */
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
@@ -3977,12 +4155,21 @@ static int i915_gem_init_rings(struct drm_device *dev)
                        goto cleanup_bsd_ring;
        }
 
+       if (HAS_VEBOX(dev)) {
+               ret = intel_init_vebox_ring_buffer(dev);
+               if (ret)
+                       goto cleanup_blt_ring;
+       }
+
+
        ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
        if (ret)
-               goto cleanup_blt_ring;
+               goto cleanup_vebox_ring;
 
        return 0;
 
+cleanup_vebox_ring:
+       intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
 cleanup_blt_ring:
        intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
 cleanup_bsd_ring:
@@ -4002,8 +4189,8 @@ i915_gem_init_hw(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
 
-       if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
-               I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+       if (dev_priv->ellc_size)
+               I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
        if (HAS_PCH_NOP(dev)) {
                u32 temp = I915_READ(GEN7_MSG_CTL);
@@ -4079,7 +4266,7 @@ int
 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4091,7 +4278,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }
 
        mutex_lock(&dev->struct_mutex);
-       dev_priv->mm.suspended = 0;
+       dev_priv->ums.mm_suspended = 0;
 
        ret = i915_gem_init_hw(dev);
        if (ret != 0) {
@@ -4099,7 +4286,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                return ret;
        }
 
-       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
        mutex_unlock(&dev->struct_mutex);
 
        ret = drm_irq_install(dev);
@@ -4111,7 +4298,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 cleanup_ringbuffer:
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
-       dev_priv->mm.suspended = 1;
+       dev_priv->ums.mm_suspended = 1;
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
@@ -4121,11 +4308,26 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
        drm_irq_uninstall(dev);
-       return i915_gem_idle(dev);
+
+       mutex_lock(&dev->struct_mutex);
+       ret =  i915_gem_idle(dev);
+
+       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
+        * We need to replace this with a semaphore, or something.
+        * And not confound ums.mm_suspended!
+        */
+       if (ret != 0)
+               dev_priv->ums.mm_suspended = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
 }
 
 void
@@ -4136,9 +4338,11 @@ i915_gem_lastclose(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
 
+       mutex_lock(&dev->struct_mutex);
        ret = i915_gem_idle(dev);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -4160,8 +4364,8 @@ i915_gem_load(struct drm_device *dev)
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
 
-       INIT_LIST_HEAD(&dev_priv->mm.active_list);
-       INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
+       INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4431,6 +4635,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
                             struct drm_i915_private,
                             mm.inactive_shrinker);
        struct drm_device *dev = dev_priv->dev;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        int nr_to_scan = sc->nr_to_scan;
        bool unlock = true;
@@ -4456,10 +4661,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        }
 
        cnt = 0;
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
-       list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
+       list_for_each_entry(obj, &vm->inactive_list, global_list)
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;