Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index ad55b06..28f91df 100644 (file)
@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
 static int i915_gem_shrinker_oom(struct notifier_block *nb,
                                 unsigned long event,
                                 void *ptr);
-static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1085,7 +1084,13 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
                if (i915_terminally_wedged(error))
                        return -EIO;
 
-               return -EAGAIN;
+               /*
+                * Check if GPU Reset is in progress - we need intel_ring_begin
+                * to work properly to reinit the hw state while the gpu is
+                * still marked as reset-in-progress. Handle this with a flag.
+                */
+               if (!error->reload_in_reset)
+                       return -EAGAIN;
        }
 
        return 0;
@@ -1735,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
         * offsets on purgeable objects by truncating it and marking it purged,
         * which prevents userspace from ever using that object again.
         */
-       i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+       i915_gem_shrink(dev_priv,
+                       obj->base.size >> PAGE_SHIFT,
+                       I915_SHRINK_BOUND |
+                       I915_SHRINK_UNBOUND |
+                       I915_SHRINK_PURGEABLE);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
                goto out;
@@ -1932,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-static unsigned long
-__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
-                 bool purgeable_only)
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+               long target, unsigned flags)
 {
-       struct list_head still_in_list;
-       struct drm_i915_gem_object *obj;
+       const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
        unsigned long count = 0;
 
        /*
@@ -1959,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
         * dev->struct_mutex and so we won't ever be able to observe an
         * object on the bound_list with a reference count equals 0.
         */
-       INIT_LIST_HEAD(&still_in_list);
-       while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
-               obj = list_first_entry(&dev_priv->mm.unbound_list,
-                                      typeof(*obj), global_list);
-               list_move_tail(&obj->global_list, &still_in_list);
+       if (flags & I915_SHRINK_UNBOUND) {
+               struct list_head still_in_list;
 
-               if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                       continue;
+               INIT_LIST_HEAD(&still_in_list);
+               while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
+                       struct drm_i915_gem_object *obj;
 
-               drm_gem_object_reference(&obj->base);
+                       obj = list_first_entry(&dev_priv->mm.unbound_list,
+                                              typeof(*obj), global_list);
+                       list_move_tail(&obj->global_list, &still_in_list);
 
-               if (i915_gem_object_put_pages(obj) == 0)
-                       count += obj->base.size >> PAGE_SHIFT;
+                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                               continue;
 
-               drm_gem_object_unreference(&obj->base);
+                       drm_gem_object_reference(&obj->base);
+
+                       if (i915_gem_object_put_pages(obj) == 0)
+                               count += obj->base.size >> PAGE_SHIFT;
+
+                       drm_gem_object_unreference(&obj->base);
+               }
+               list_splice(&still_in_list, &dev_priv->mm.unbound_list);
        }
-       list_splice(&still_in_list, &dev_priv->mm.unbound_list);
 
-       INIT_LIST_HEAD(&still_in_list);
-       while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
-               struct i915_vma *vma, *v;
+       if (flags & I915_SHRINK_BOUND) {
+               struct list_head still_in_list;
 
-               obj = list_first_entry(&dev_priv->mm.bound_list,
-                                      typeof(*obj), global_list);
-               list_move_tail(&obj->global_list, &still_in_list);
+               INIT_LIST_HEAD(&still_in_list);
+               while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+                       struct drm_i915_gem_object *obj;
+                       struct i915_vma *vma, *v;
 
-               if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                       continue;
+                       obj = list_first_entry(&dev_priv->mm.bound_list,
+                                              typeof(*obj), global_list);
+                       list_move_tail(&obj->global_list, &still_in_list);
 
-               drm_gem_object_reference(&obj->base);
+                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                               continue;
 
-               list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
-                       if (i915_vma_unbind(vma))
-                               break;
+                       drm_gem_object_reference(&obj->base);
 
-               if (i915_gem_object_put_pages(obj) == 0)
-                       count += obj->base.size >> PAGE_SHIFT;
+                       list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                               if (i915_vma_unbind(vma))
+                                       break;
 
-               drm_gem_object_unreference(&obj->base);
+                       if (i915_gem_object_put_pages(obj) == 0)
+                               count += obj->base.size >> PAGE_SHIFT;
+
+                       drm_gem_object_unreference(&obj->base);
+               }
+               list_splice(&still_in_list, &dev_priv->mm.bound_list);
        }
-       list_splice(&still_in_list, &dev_priv->mm.bound_list);
 
        return count;
 }
 
-static unsigned long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
-{
-       return __i915_gem_shrink(dev_priv, target, true);
-}
-
 static unsigned long
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
        i915_gem_evict_everything(dev_priv->dev);
-       return __i915_gem_shrink(dev_priv, LONG_MAX, false);
+       return i915_gem_shrink(dev_priv, LONG_MAX,
+                              I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
 }
 
 static int
@@ -2061,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        for (i = 0; i < page_count; i++) {
                page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                if (IS_ERR(page)) {
-                       i915_gem_purge(dev_priv, page_count);
+                       i915_gem_shrink(dev_priv,
+                                       page_count,
+                                       I915_SHRINK_BOUND |
+                                       I915_SHRINK_UNBOUND |
+                                       I915_SHRINK_PURGEABLE);
                        page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                }
                if (IS_ERR(page)) {
@@ -2163,8 +2181,6 @@ static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
@@ -2183,19 +2199,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
-
-       if (obj->fenced_gpu_access) {
-               obj->last_fenced_seqno = seqno;
-
-               /* Bump MRU to take account of the delayed flush */
-               if (obj->fence_reg != I915_FENCE_REG_NONE) {
-                       struct drm_i915_fence_reg *reg;
-
-                       reg = &dev_priv->fence_regs[obj->fence_reg];
-                       list_move_tail(&reg->lru_list,
-                                      &dev_priv->mm.fence_list);
-               }
-       }
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2231,7 +2234,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
        obj->base.write_domain = 0;
 
        obj->last_fenced_seqno = 0;
-       obj->fenced_gpu_access = false;
 
        obj->active = 0;
        drm_gem_object_unreference(&obj->base);
@@ -2329,10 +2331,21 @@ int __i915_add_request(struct intel_engine_cs *ring,
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
+       struct intel_ringbuffer *ringbuf;
        u32 request_ring_position, request_start;
        int ret;
 
-       request_start = intel_ring_get_tail(ring->buffer);
+       request = ring->preallocated_lazy_request;
+       if (WARN_ON(request == NULL))
+               return -ENOMEM;
+
+       if (i915.enable_execlists) {
+               struct intel_context *ctx = request->ctx;
+               ringbuf = ctx->engine[ring->id].ringbuf;
+       } else
+               ringbuf = ring->buffer;
+
+       request_start = intel_ring_get_tail(ringbuf);
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
@@ -2340,24 +2353,32 @@ int __i915_add_request(struct intel_engine_cs *ring,
         * is that the flush _must_ happen before the next request, no matter
         * what.
         */
-       ret = intel_ring_flush_all_caches(ring);
-       if (ret)
-               return ret;
-
-       request = ring->preallocated_lazy_request;
-       if (WARN_ON(request == NULL))
-               return -ENOMEM;
+       if (i915.enable_execlists) {
+               ret = logical_ring_flush_all_caches(ringbuf);
+               if (ret)
+                       return ret;
+       } else {
+               ret = intel_ring_flush_all_caches(ring);
+               if (ret)
+                       return ret;
+       }
 
        /* Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       request_ring_position = intel_ring_get_tail(ring->buffer);
+       request_ring_position = intel_ring_get_tail(ringbuf);
 
-       ret = ring->add_request(ring);
-       if (ret)
-               return ret;
+       if (i915.enable_execlists) {
+               ret = ring->emit_request(ringbuf);
+               if (ret)
+                       return ret;
+       } else {
+               ret = ring->add_request(ring);
+               if (ret)
+                       return ret;
+       }
 
        request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
@@ -2372,12 +2393,14 @@ int __i915_add_request(struct intel_engine_cs *ring,
         */
        request->batch_obj = obj;
 
-       /* Hold a reference to the current context so that we can inspect
-        * it later in case a hangcheck error event fires.
-        */
-       request->ctx = ring->last_context;
-       if (request->ctx)
-               i915_gem_context_reference(request->ctx);
+       if (!i915.enable_execlists) {
+               /* Hold a reference to the current context so that we can inspect
+                * it later in case a hangcheck error event fires.
+                */
+               request->ctx = ring->last_context;
+               if (request->ctx)
+                       i915_gem_context_reference(request->ctx);
+       }
 
        request->emitted_jiffies = jiffies;
        list_add_tail(&request->list, &ring->request_list);
@@ -2548,6 +2571,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                i915_gem_free_request(request);
        }
 
+       while (!list_empty(&ring->execlist_queue)) {
+               struct intel_ctx_submit_request *submit_req;
+
+               submit_req = list_first_entry(&ring->execlist_queue,
+                               struct intel_ctx_submit_request,
+                               execlist_link);
+               list_del(&submit_req->execlist_link);
+               intel_runtime_pm_put(dev_priv);
+               i915_gem_context_unreference(submit_req->ctx);
+               kfree(submit_req);
+       }
+
        /* These may not have been flush before the reset, do so now */
        kfree(ring->preallocated_lazy_request);
        ring->preallocated_lazy_request = NULL;
@@ -2632,6 +2667,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
+               struct intel_ringbuffer *ringbuf;
 
                request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
@@ -2641,12 +2677,24 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                        break;
 
                trace_i915_gem_request_retire(ring, request->seqno);
+
+               /* This is one of the few common intersection points
+                * between legacy ringbuffer submission and execlists:
+                * we need to tell them apart in order to find the correct
+                * ringbuffer to which the request belongs to.
+                */
+               if (i915.enable_execlists) {
+                       struct intel_context *ctx = request->ctx;
+                       ringbuf = ctx->engine[ring->id].ringbuf;
+               } else
+                       ringbuf = ring->buffer;
+
                /* We know the GPU must have read the request to have
                 * sent us the seqno + interrupt, so use the position
                 * of tail of the request to update the last known position
                 * of the GPU head.
                 */
-               ring->buffer->last_retired_head = request->tail;
+               ringbuf->last_retired_head = request->tail;
 
                i915_gem_free_request(request);
        }
@@ -2908,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma)
         * cause memory corruption through use-after-free.
         */
 
+       /* Throw away the active reference before moving to the unbound list */
+       i915_gem_object_retire(obj);
+
        if (i915_is_ggtt(vma->vm)) {
                i915_gem_object_finish_gtt(obj);
 
@@ -2922,9 +2973,8 @@ int i915_vma_unbind(struct i915_vma *vma)
        vma->unbind_vma(vma);
 
        list_del_init(&vma->mm_list);
-       /* Avoid an unnecessary call to unbind on rebind. */
        if (i915_is_ggtt(vma->vm))
-               obj->map_and_fenceable = true;
+               obj->map_and_fenceable = false;
 
        drm_mm_remove_node(&vma->node);
        i915_gem_vma_destroy(vma);
@@ -2953,9 +3003,11 @@ int i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        for_each_ring(ring, dev_priv, i) {
-               ret = i915_switch_context(ring, ring->default_context);
-               if (ret)
-                       return ret;
+               if (!i915.enable_execlists) {
+                       ret = i915_switch_context(ring, ring->default_context);
+                       if (ret)
+                               return ret;
+               }
 
                ret = intel_ring_idle(ring);
                if (ret)
@@ -3169,7 +3221,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
                obj->last_fenced_seqno = 0;
        }
 
-       obj->fenced_gpu_access = false;
        return 0;
 }
 
@@ -3276,6 +3327,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
                        return 0;
                }
        } else if (enable) {
+               if (WARN_ON(!obj->map_and_fenceable))
+                       return -EINVAL;
+
                reg = i915_find_fence_reg(dev);
                if (IS_ERR(reg))
                        return PTR_ERR(reg);
@@ -3297,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-static bool i915_gem_valid_gtt_space(struct drm_device *dev,
-                                    struct drm_mm_node *gtt_space,
+static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
                                     unsigned long cache_level)
 {
+       struct drm_mm_node *gtt_space = &vma->node;
        struct drm_mm_node *other;
 
-       /* On non-LLC machines we have to be careful when putting differing
-        * types of snoopable memory together to avoid the prefetcher
-        * crossing memory domains and dying.
+       /*
+        * On some machines we have to be careful when putting differing types
+        * of snoopable memory together to avoid the prefetcher crossing memory
+        * domains and dying. During vm initialisation, we decide whether or not
+        * these constraints apply and set the drm_mm.color_adjust
+        * appropriately.
         */
-       if (HAS_LLC(dev))
+       if (vma->vm->mm.color_adjust == NULL)
                return true;
 
        if (!drm_mm_node_allocated(gtt_space))
@@ -3445,8 +3502,7 @@ search_free:
 
                goto err_free_vma;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
-                                             obj->cache_level))) {
+       if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
                ret = -EINVAL;
                goto err_remove_node;
        }
@@ -3586,11 +3642,12 @@ int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
        /* Not valid to be called on unbound objects. */
-       if (!i915_gem_obj_bound_any(obj))
+       if (vma == NULL)
                return -EINVAL;
 
        if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3632,13 +3689,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
                                            old_write_domain);
 
        /* And bump the LRU for this access */
-       if (i915_gem_object_is_inactive(obj)) {
-               struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
-               if (vma)
-                       list_move_tail(&vma->mm_list,
-                                      &dev_priv->gtt.base.inactive_list);
-
-       }
+       if (i915_gem_object_is_inactive(obj))
+               list_move_tail(&vma->mm_list,
+                              &dev_priv->gtt.base.inactive_list);
 
        return 0;
 }
@@ -3659,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
        }
 
        list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
-               if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+               if (!i915_gem_valid_gtt_space(vma, cache_level)) {
                        ret = i915_vma_unbind(vma);
                        if (ret)
                                return ret;
@@ -3802,9 +3855,6 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
 
-       if (list_empty(&obj->vma_list))
-               return false;
-
        vma = i915_gem_obj_to_ggtt(obj);
        if (!vma)
                return false;
@@ -4331,8 +4381,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
        obj->fence_reg = I915_FENCE_REG_NONE;
        obj->madv = I915_MADV_WILLNEED;
-       /* Avoid an unnecessary call to unbind on the first bind. */
-       obj->map_and_fenceable = true;
 
        i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
 }
@@ -4493,12 +4541,18 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
+       struct i915_address_space *vm = NULL;
        WARN_ON(vma->node.allocated);
 
        /* Keep the vma as a placeholder in the execbuffer reservation lists */
        if (!list_empty(&vma->exec_list))
                return;
 
+       vm = vma->vm;
+
+       if (!i915_is_ggtt(vm))
+               i915_ppgtt_put(i915_vm_to_ppgtt(vm));
+
        list_del(&vma->vma_link);
 
        kfree(vma);
@@ -4512,7 +4566,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
        int i;
 
        for_each_ring(ring, dev_priv, i)
-               intel_stop_ring_buffer(ring);
+               dev_priv->gt.stop_ring(ring);
 }
 
 int
@@ -4629,11 +4683,46 @@ intel_enable_blt(struct drm_device *dev)
        return true;
 }
 
-static int i915_gem_init_rings(struct drm_device *dev)
+static void init_unused_ring(struct drm_device *dev, u32 base)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(RING_CTL(base), 0);
+       I915_WRITE(RING_HEAD(base), 0);
+       I915_WRITE(RING_TAIL(base), 0);
+       I915_WRITE(RING_START(base), 0);
+}
+
+static void init_unused_rings(struct drm_device *dev)
+{
+       if (IS_I830(dev)) {
+               init_unused_ring(dev, PRB1_BASE);
+               init_unused_ring(dev, SRB0_BASE);
+               init_unused_ring(dev, SRB1_BASE);
+               init_unused_ring(dev, SRB2_BASE);
+               init_unused_ring(dev, SRB3_BASE);
+       } else if (IS_GEN2(dev)) {
+               init_unused_ring(dev, SRB0_BASE);
+               init_unused_ring(dev, SRB1_BASE);
+       } else if (IS_GEN3(dev)) {
+               init_unused_ring(dev, PRB1_BASE);
+               init_unused_ring(dev, PRB2_BASE);
+       }
+}
+
+int i915_gem_init_rings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       /*
+        * At least 830 can leave some of the unused rings
+        * "active" (ie. head != tail) after resume which
+        * will prevent c3 entry. Makes sure all unused rings
+        * are totally idle.
+        */
+       init_unused_rings(dev);
+
        ret = intel_init_render_ring_buffer(dev);
        if (ret)
                return ret;
@@ -4712,7 +4801,7 @@ i915_gem_init_hw(struct drm_device *dev)
 
        i915_gem_init_swizzling(dev);
 
-       ret = i915_gem_init_rings(dev);
+       ret = dev_priv->gt.init_rings(dev);
        if (ret)
                return ret;
 
@@ -4730,6 +4819,14 @@ i915_gem_init_hw(struct drm_device *dev)
        if (ret && ret != -EIO) {
                DRM_ERROR("Context enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
+
+               return ret;
+       }
+
+       ret = i915_ppgtt_init_hw(dev);
+       if (ret && ret != -EIO) {
+               DRM_ERROR("PPGTT enable failed %d\n", ret);
+               i915_gem_cleanup_ringbuffer(dev);
        }
 
        return ret;
@@ -4740,6 +4837,9 @@ int i915_gem_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       i915.enable_execlists = intel_sanitize_enable_execlists(dev,
+                       i915.enable_execlists);
+
        mutex_lock(&dev->struct_mutex);
 
        if (IS_VALLEYVIEW(dev)) {
@@ -4750,7 +4850,24 @@ int i915_gem_init(struct drm_device *dev)
                        DRM_DEBUG_DRIVER("allow wake ack timed out\n");
        }
 
-       i915_gem_init_userptr(dev);
+       if (!i915.enable_execlists) {
+               dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
+               dev_priv->gt.init_rings = i915_gem_init_rings;
+               dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
+               dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+       } else {
+               dev_priv->gt.do_execbuf = intel_execlists_submission;
+               dev_priv->gt.init_rings = intel_logical_rings_init;
+               dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
+               dev_priv->gt.stop_ring = intel_logical_ring_stop;
+       }
+
+       ret = i915_gem_init_userptr(dev);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
        i915_gem_init_global_gtt(dev);
 
        ret = i915_gem_context_init(dev);
@@ -4785,7 +4902,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
        int i;
 
        for_each_ring(ring, dev_priv, i)
-               intel_cleanup_ring_buffer(ring);
+               dev_priv->gt.cleanup_ring(ring);
 }
 
 int
@@ -5097,9 +5214,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       if (!dev_priv->mm.aliasing_ppgtt ||
-           vm == &dev_priv->mm.aliasing_ppgtt->base)
-               vm = &dev_priv->gtt.base;
+       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
        list_for_each_entry(vma, &o->vma_list, vma_link) {
                if (vma->vm == vm)
@@ -5140,9 +5255,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       if (!dev_priv->mm.aliasing_ppgtt ||
-           vm == &dev_priv->mm.aliasing_ppgtt->base)
-               vm = &dev_priv->gtt.base;
+       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
        BUG_ON(list_empty(&o->vma_list));
 
@@ -5165,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
        if (!i915_gem_shrinker_lock(dev, &unlock))
                return SHRINK_STOP;
 
-       freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+       freed = i915_gem_shrink(dev_priv,
+                               sc->nr_to_scan,
+                               I915_SHRINK_BOUND |
+                               I915_SHRINK_UNBOUND |
+                               I915_SHRINK_PURGEABLE);
        if (freed < sc->nr_to_scan)
-               freed += __i915_gem_shrink(dev_priv,
-                                          sc->nr_to_scan - freed,
-                                          false);
+               freed += i915_gem_shrink(dev_priv,
+                                        sc->nr_to_scan - freed,
+                                        I915_SHRINK_BOUND |
+                                        I915_SHRINK_UNBOUND);
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
 
@@ -5247,14 +5365,8 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
 
-       /* This WARN has probably outlived its usefulness (callers already
-        * WARN if they don't find the GGTT vma they expect). When removing,
-        * remember to remove the pre-check in is_pin_display() as well */
-       if (WARN_ON(list_empty(&obj->vma_list)))
-               return NULL;
-
        vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
-       if (vma->vm != obj_to_ggtt(obj))
+       if (vma->vm != i915_obj_to_ggtt(obj))
                return NULL;
 
        return vma;