Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm...
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index dabc089..12407bc 100644 (file)
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_mocs.h"
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
-#define RQ_BUG_ON(expr)
-
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 static void
@@ -85,9 +84,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 {
        int ret;
 
-#define EXIT_COND (!i915_reset_in_progress(error) || \
-                  i915_terminally_wedged(error))
-       if (EXIT_COND)
+       if (!i915_reset_in_progress(error))
                return 0;
 
        /*
@@ -96,17 +93,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
         * we should simply try to bail out and fail as gracefully as possible.
         */
        ret = wait_event_interruptible_timeout(error->reset_queue,
-                                              EXIT_COND,
+                                              !i915_reset_in_progress(error),
                                               10*HZ);
        if (ret == 0) {
                DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
                return -EIO;
        } else if (ret < 0) {
                return ret;
+       } else {
+               return 0;
        }
-#undef EXIT_COND
-
-       return 0;
 }
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
@@ -130,9 +126,9 @@ int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_get_aperture *args = data;
-       struct i915_gtt *ggtt = &dev_priv->gtt;
        struct i915_vma *vma;
        size_t pinned;
 
@@ -146,7 +142,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                        pinned += vma->node.size;
        mutex_unlock(&dev->struct_mutex);
 
-       args->aper_size = dev_priv->gtt.base.total;
+       args->aper_size = ggtt->base.total;
        args->aper_available_size = args->aper_size - pinned;
 
        return 0;
@@ -181,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
                vaddr += PAGE_SIZE;
        }
 
-       i915_gem_chipset_flush(obj->base.dev);
+       i915_gem_chipset_flush(to_i915(obj->base.dev));
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
@@ -211,11 +207,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
        BUG_ON(obj->madv == __I915_MADV_PURGED);
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
-       if (ret) {
+       if (WARN_ON(ret)) {
                /* In the event of a disaster, abandon all caches and
                 * hope for the best.
                 */
-               WARN_ON(ret != -EIO);
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
@@ -324,7 +319,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        void *vaddr = obj->phys_handle->vaddr + args->offset;
-       char __user *user_data = to_user_ptr(args->data_ptr);
+       char __user *user_data = u64_to_user_ptr(args->data_ptr);
        int ret = 0;
 
        /* We manually control the domain here and pretend that it
@@ -352,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        }
 
        drm_clflush_virt_range(vaddr, args->size);
-       i915_gem_chipset_flush(dev);
+       i915_gem_chipset_flush(to_i915(dev));
 
 out:
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -386,9 +381,9 @@ i915_gem_create(struct drm_file *file,
                return -EINVAL;
 
        /* Allocate the new object */
-       obj = i915_gem_alloc_object(dev, size);
-       if (obj == NULL)
-               return -ENOMEM;
+       obj = i915_gem_object_create(dev, size);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
@@ -605,7 +600,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int needs_clflush = 0;
        struct sg_page_iter sg_iter;
 
-       user_data = to_user_ptr(args->data_ptr);
+       user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
 
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -692,7 +687,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                return 0;
 
        if (!access_ok(VERIFY_WRITE,
-                      to_user_ptr(args->data_ptr),
+                      u64_to_user_ptr(args->data_ptr),
                       args->size))
                return -EFAULT;
 
@@ -700,7 +695,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@ -765,7 +760,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
@@ -783,7 +779,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        if (ret)
                goto out_unpin;
 
-       user_data = to_user_ptr(args->data_ptr);
+       user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
 
        offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
@@ -807,7 +803,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
                 */
-               if (fast_user_write(dev_priv->gtt.mappable, page_base,
+               if (fast_user_write(ggtt->mappable, page_base,
                                    page_offset, user_data, page_length)) {
                        ret = -EFAULT;
                        goto out_flush;
@@ -907,7 +903,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        int needs_clflush_before = 0;
        struct sg_page_iter sg_iter;
 
-       user_data = to_user_ptr(args->data_ptr);
+       user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
 
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -1010,7 +1006,7 @@ out:
        }
 
        if (needs_clflush_after)
-               i915_gem_chipset_flush(dev);
+               i915_gem_chipset_flush(to_i915(dev));
        else
                obj->cache_dirty = true;
 
@@ -1036,12 +1032,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                return 0;
 
        if (!access_ok(VERIFY_READ,
-                      to_user_ptr(args->data_ptr),
+                      u64_to_user_ptr(args->data_ptr),
                       args->size))
                return -EFAULT;
 
        if (likely(!i915.prefault_disable)) {
-               ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+               ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
                                                   args->size);
                if (ret)
                        return -EFAULT;
@@ -1053,7 +1049,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto put_rpm;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@ -1109,27 +1105,19 @@ put_rpm:
        return ret;
 }
 
-int
-i915_gem_check_wedge(struct i915_gpu_error *error,
-                    bool interruptible)
+static int
+i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
 {
-       if (i915_reset_in_progress(error)) {
+       if (__i915_terminally_wedged(reset_counter))
+               return -EIO;
+
+       if (__i915_reset_in_progress(reset_counter)) {
                /* Non-interruptible callers can't handle -EAGAIN, hence return
                 * -EIO unconditionally for these. */
                if (!interruptible)
                        return -EIO;
 
-               /* Recovery complete, but the reset failed ... */
-               if (i915_terminally_wedged(error))
-                       return -EIO;
-
-               /*
-                * Check if GPU Reset is in progress - we need intel_ring_begin
-                * to work properly to reinit the hw state while the gpu is
-                * still marked as reset-in-progress. Handle this with a flag.
-                */
-               if (!error->reload_in_reset)
-                       return -EAGAIN;
+               return -EAGAIN;
        }
 
        return 0;
@@ -1141,9 +1129,9 @@ static void fake_irq(unsigned long data)
 }
 
 static bool missed_irq(struct drm_i915_private *dev_priv,
-                      struct intel_engine_cs *ring)
+                      struct intel_engine_cs *engine)
 {
-       return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+       return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
 }
 
 static unsigned long local_clock_us(unsigned *cpu)
@@ -1193,7 +1181,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
         * takes to sleep on a request, on the order of a microsecond.
         */
 
-       if (req->ring->irq_refcount)
+       if (req->engine->irq_refcount)
                return -EBUSY;
 
        /* Only spin if we know the GPU is processing this request */
@@ -1223,7 +1211,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
 /**
  * __i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @reset_counter: reset sequence associated with the given request
  * @interruptible: do an interruptible wait (normally yes)
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  *
@@ -1238,16 +1225,14 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
  * errno with remaining time filled in timeout argument.
  */
 int __i915_wait_request(struct drm_i915_gem_request *req,
-                       unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
                        struct intel_rps_client *rps)
 {
-       struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
+       struct drm_i915_private *dev_priv = req->i915;
        const bool irq_test_in_progress =
-               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(wait);
        unsigned long timeout_expire;
@@ -1288,7 +1273,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        if (ret == 0)
                goto out;
 
-       if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+       if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
                ret = -ENODEV;
                goto out;
        }
@@ -1296,16 +1281,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        for (;;) {
                struct timer_list timer;
 
-               prepare_to_wait(&ring->irq_queue, &wait, state);
+               prepare_to_wait(&engine->irq_queue, &wait, state);
 
                /* We need to check whether any gpu reset happened in between
-                * the caller grabbing the seqno and now ... */
-               if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
-                       /* ... but upgrade the -EAGAIN to an -EIO if the gpu
-                        * is truely gone. */
-                       ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-                       if (ret == 0)
-                               ret = -EAGAIN;
+                * the request being submitted and now. If a reset has occurred,
+                * the request is effectively complete (we either are in the
+                * process of or have discarded the rendering and completely
+                * reset the GPU. The results of the request are lost and we
+                * are free to continue on with the original operation.
+                */
+               if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
+                       ret = 0;
                        break;
                }
 
@@ -1325,11 +1311,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                }
 
                timer.function = NULL;
-               if (timeout || missed_irq(dev_priv, ring)) {
+               if (timeout || missed_irq(dev_priv, engine)) {
                        unsigned long expire;
 
                        setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-                       expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+                       expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
                        mod_timer(&timer, expire);
                }
 
@@ -1341,9 +1327,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                }
        }
        if (!irq_test_in_progress)
-               ring->irq_put(ring);
+               engine->irq_put(engine);
 
-       finish_wait(&ring->irq_queue, &wait);
+       finish_wait(&engine->irq_queue, &wait);
 
 out:
        trace_i915_gem_request_wait_end(req);
@@ -1370,7 +1356,6 @@ out:
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file)
 {
-       struct drm_i915_private *dev_private;
        struct drm_i915_file_private *file_priv;
 
        WARN_ON(!req || !file || req->file_priv);
@@ -1381,7 +1366,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
        if (req->file_priv)
                return -EINVAL;
 
-       dev_private = req->ring->dev->dev_private;
        file_priv = file->driver_priv;
 
        spin_lock(&file_priv->mm.lock);
@@ -1428,16 +1412,23 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
        list_del_init(&request->list);
        i915_gem_request_remove_from_client(request);
 
+       if (request->previous_context) {
+               if (i915.enable_execlists)
+                       intel_lr_context_unpin(request->previous_context,
+                                              request->engine);
+       }
+
+       i915_gem_context_unreference(request->ctx);
        i915_gem_request_unreference(request);
 }
 
 static void
 __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *tmp;
 
-       lockdep_assert_held(&engine->dev->struct_mutex);
+       lockdep_assert_held(&engine->i915->dev->struct_mutex);
 
        if (list_empty(&req->list))
                return;
@@ -1459,30 +1450,22 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
-       struct drm_device *dev;
-       struct drm_i915_private *dev_priv;
+       struct drm_i915_private *dev_priv = req->i915;
        bool interruptible;
        int ret;
 
-       BUG_ON(req == NULL);
-
-       dev = req->ring->dev;
-       dev_priv = dev->dev_private;
        interruptible = dev_priv->mm.interruptible;
 
-       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+       ret = __i915_wait_request(req, interruptible, NULL, NULL);
        if (ret)
                return ret;
 
-       ret = __i915_wait_request(req,
-                                 atomic_read(&dev_priv->gpu_error.reset_counter),
-                                 interruptible, NULL, NULL);
-       if (ret)
-               return ret;
+       /* If the GPU hung, we want to keep the requests to find the guilty. */
+       if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+               __i915_gem_request_retire__upto(req);
 
-       __i915_gem_request_retire__upto(req);
        return 0;
 }
 
@@ -1505,14 +1488,14 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                        if (ret)
                                return ret;
 
-                       i = obj->last_write_req->ring->id;
+                       i = obj->last_write_req->engine->id;
                        if (obj->last_read_req[i] == obj->last_write_req)
                                i915_gem_object_retire__read(obj, i);
                        else
                                i915_gem_object_retire__write(obj);
                }
        } else {
-               for (i = 0; i < I915_NUM_RINGS; i++) {
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
                        if (obj->last_read_req[i] == NULL)
                                continue;
 
@@ -1522,7 +1505,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 
                        i915_gem_object_retire__read(obj, i);
                }
-               RQ_BUG_ON(obj->active);
+               GEM_BUG_ON(obj->active);
        }
 
        return 0;
@@ -1532,14 +1515,15 @@ static void
 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
                               struct drm_i915_gem_request *req)
 {
-       int ring = req->ring->id;
+       int ring = req->engine->id;
 
        if (obj->last_read_req[ring] == req)
                i915_gem_object_retire__read(obj, ring);
        else if (obj->last_write_req == req)
                i915_gem_object_retire__write(obj);
 
-       __i915_gem_request_retire__upto(req);
+       if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+               __i915_gem_request_retire__upto(req);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1552,8 +1536,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_request *requests[I915_NUM_RINGS];
-       unsigned reset_counter;
+       struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
        int ret, i, n = 0;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1562,12 +1545,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        if (!obj->active)
                return 0;
 
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
-       if (ret)
-               return ret;
-
-       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-
        if (readonly) {
                struct drm_i915_gem_request *req;
 
@@ -1577,7 +1554,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
                requests[n++] = i915_gem_request_reference(req);
        } else {
-               for (i = 0; i < I915_NUM_RINGS; i++) {
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
                        struct drm_i915_gem_request *req;
 
                        req = obj->last_read_req[i];
@@ -1589,9 +1566,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        }
 
        mutex_unlock(&dev->struct_mutex);
+       ret = 0;
        for (i = 0; ret == 0 && i < n; i++)
-               ret = __i915_wait_request(requests[i], reset_counter, true,
-                                         NULL, rps);
+               ret = __i915_wait_request(requests[i], true, NULL, rps);
        mutex_lock(&dev->struct_mutex);
 
        for (i = 0; i < n; i++) {
@@ -1640,7 +1617,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@ -1688,7 +1665,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@ -1732,10 +1709,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        if (args->flags & ~(I915_MMAP_WC))
                return -EINVAL;
 
-       if (args->flags & I915_MMAP_WC && !cpu_has_pat)
+       if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
                return -ENODEV;
 
-       obj = drm_gem_object_lookup(dev, file, args->handle);
+       obj = drm_gem_object_lookup(file, args->handle);
        if (obj == NULL)
                return -ENOENT;
 
@@ -1754,7 +1731,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
 
-               down_write(&mm->mmap_sem);
+               if (down_write_killable(&mm->mmap_sem)) {
+                       drm_gem_object_unreference_unlocked(obj);
+                       return -EINTR;
+               }
                vma = find_vma(mm, addr);
                if (vma)
                        vma->vm_page_prot =
@@ -1792,7 +1772,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_ggtt_view view = i915_ggtt_view_normal;
        pgoff_t page_offset;
        unsigned long pfn;
@@ -1827,7 +1808,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        /* Use a partial view if the object is bigger than the aperture. */
-       if (obj->base.size >= dev_priv->gtt.mappable_end &&
+       if (obj->base.size >= ggtt->mappable_end &&
            obj->tiling_mode == I915_TILING_NONE) {
                static const unsigned int chunk_size = 256; // 1 MiB
 
@@ -1855,7 +1836,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto unpin;
 
        /* Finally, remap it using the new GTT offset */
-       pfn = dev_priv->gtt.mappable_base +
+       pfn = ggtt->mappable_base +
                i915_gem_obj_ggtt_offset_view(obj, &view);
        pfn >>= PAGE_SHIFT;
 
@@ -1964,11 +1945,27 @@ out:
 void
 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
+       /* Serialisation between user GTT access and our code depends upon
+        * revoking the CPU's PTE whilst the mutex is held. The next user
+        * pagefault then has to wait until we release the mutex.
+        */
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
        if (!obj->fault_mappable)
                return;
 
        drm_vma_node_unmap(&obj->base.vma_node,
                           obj->base.dev->anon_inode->i_mapping);
+
+       /* Ensure that the CPU's PTE are revoked and there are not outstanding
+        * memory transactions from userspace before we return. The TLB
+        * flushing implied above by changing the PTE above *should* be
+        * sufficient, an extra barrier here just provides us with a bit
+        * of paranoid documentation about our requirement to serialise
+        * memory writes before touching registers / GSM.
+        */
+       wmb();
+
        obj->fault_mappable = false;
 }
 
@@ -1991,7 +1988,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
                return size;
 
        /* Previous chips need a power-of-two fence region when tiling */
-       if (INTEL_INFO(dev)->gen == 3)
+       if (IS_GEN3(dev))
                gtt_size = 1024*1024;
        else
                gtt_size = 512*1024;
@@ -2033,9 +2030,6 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       if (drm_vma_node_has_offset(&obj->base.vma_node))
-               return 0;
-
        dev_priv->mm.shrinker_no_lock_stealing = true;
 
        ret = drm_gem_create_mmap_offset(&obj->base);
@@ -2084,7 +2078,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@ -2174,17 +2168,17 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
+       struct page *page;
        int ret;
 
        BUG_ON(obj->madv == __I915_MADV_PURGED);
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
-       if (ret) {
+       if (WARN_ON(ret)) {
                /* In the event of a disaster, abandon all caches and
                 * hope for the best.
                 */
-               WARN_ON(ret != -EIO);
                i915_gem_clflush_object(obj, true);
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
@@ -2197,9 +2191,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        if (obj->madv == I915_MADV_DONTNEED)
                obj->dirty = 0;
 
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_page_iter_page(&sg_iter);
-
+       for_each_sgt_page(page, sgt_iter, obj->pages) {
                if (obj->dirty)
                        set_page_dirty(page);
 
@@ -2232,6 +2224,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
         * lists early. */
        list_del(&obj->global_list);
 
+       if (obj->mapping) {
+               if (is_vmalloc_addr(obj->mapping))
+                       vunmap(obj->mapping);
+               else
+                       kunmap(kmap_to_page(obj->mapping));
+               obj->mapping = NULL;
+       }
+
        ops->put_pages(obj);
        obj->pages = NULL;
 
@@ -2248,7 +2248,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct address_space *mapping;
        struct sg_table *st;
        struct scatterlist *sg;
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        int ret;
@@ -2345,8 +2345,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 
 err_pages:
        sg_mark_end(sg);
-       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-               put_page(sg_page_iter_page(&sg_iter));
+       for_each_sgt_page(page, sgt_iter, st)
+               put_page(page);
        sg_free_table(st);
        kfree(st);
 
@@ -2400,21 +2400,82 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+{
+       unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+       struct sg_table *sgt = obj->pages;
+       struct sgt_iter sgt_iter;
+       struct page *page;
+       struct page *stack_pages[32];
+       struct page **pages = stack_pages;
+       unsigned long i = 0;
+       void *addr;
+
+       /* A single page can always be kmapped */
+       if (n_pages == 1)
+               return kmap(sg_page(sgt->sgl));
+
+       if (n_pages > ARRAY_SIZE(stack_pages)) {
+               /* Too big for stack -- allocate temporary array instead */
+               pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+               if (!pages)
+                       return NULL;
+       }
+
+       for_each_sgt_page(page, sgt_iter, sgt)
+               pages[i++] = page;
+
+       /* Check that we have the expected number of pages */
+       GEM_BUG_ON(i != n_pages);
+
+       addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+
+       if (pages != stack_pages)
+               drm_free_large(pages);
+
+       return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+{
+       int ret;
+
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               return ERR_PTR(ret);
+
+       i915_gem_object_pin_pages(obj);
+
+       if (!obj->mapping) {
+               obj->mapping = i915_gem_object_map(obj);
+               if (!obj->mapping) {
+                       i915_gem_object_unpin_pages(obj);
+                       return ERR_PTR(-ENOMEM);
+               }
+       }
+
+       return obj->mapping;
+}
+
 void i915_vma_move_to_active(struct i915_vma *vma,
                             struct drm_i915_gem_request *req)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct intel_engine_cs *ring;
+       struct intel_engine_cs *engine;
 
-       ring = i915_gem_request_get_ring(req);
+       engine = i915_gem_request_get_engine(req);
 
        /* Add a reference if we're newly entering the active list. */
        if (obj->active == 0)
                drm_gem_object_reference(&obj->base);
-       obj->active |= intel_ring_flag(ring);
+       obj->active |= intel_engine_flag(engine);
 
-       list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-       i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+       list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
+       i915_gem_request_assign(&obj->last_read_req[engine->id], req);
 
        list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
@@ -2422,8 +2483,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
-       RQ_BUG_ON(obj->last_write_req == NULL);
-       RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+       GEM_BUG_ON(obj->last_write_req == NULL);
+       GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
 
        i915_gem_request_assign(&obj->last_write_req, NULL);
        intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2434,13 +2495,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 {
        struct i915_vma *vma;
 
-       RQ_BUG_ON(obj->last_read_req[ring] == NULL);
-       RQ_BUG_ON(!(obj->active & (1 << ring)));
+       GEM_BUG_ON(obj->last_read_req[ring] == NULL);
+       GEM_BUG_ON(!(obj->active & (1 << ring)));
 
-       list_del_init(&obj->ring_list[ring]);
+       list_del_init(&obj->engine_list[ring]);
        i915_gem_request_assign(&obj->last_read_req[ring], NULL);
 
-       if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+       if (obj->last_write_req && obj->last_write_req->engine->id == ring)
                i915_gem_object_retire__write(obj);
 
        obj->active &= ~(1 << ring);
@@ -2464,27 +2525,22 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 }
 
 static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       int ret, i, j;
+       struct intel_engine_cs *engine;
+       int ret;
 
        /* Carefully retire all requests without writing to the rings */
-       for_each_ring(ring, dev_priv, i) {
-               ret = intel_ring_idle(ring);
+       for_each_engine(engine, dev_priv) {
+               ret = intel_engine_idle(engine);
                if (ret)
                        return ret;
        }
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev_priv);
 
        /* Finally reset hw state */
-       for_each_ring(ring, dev_priv, i) {
-               intel_ring_init_seqno(ring, seqno);
-
-               for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
-                       ring->semaphore.sync_seqno[j] = 0;
-       }
+       for_each_engine(engine, dev_priv)
+               intel_ring_init_seqno(engine, seqno);
 
        return 0;
 }
@@ -2500,7 +2556,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
        /* HWS page needs to be set less than what we
         * will inject to ring
         */
-       ret = i915_gem_init_seqno(dev, seqno - 1);
+       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
        if (ret)
                return ret;
 
@@ -2516,13 +2572,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 }
 
 int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        /* reserve 0 for non-seqno */
        if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_init_seqno(dev, 0);
+               int ret = i915_gem_init_seqno(dev_priv, 0);
                if (ret)
                        return ret;
 
@@ -2542,17 +2596,18 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                        struct drm_i915_gem_object *obj,
                        bool flush_caches)
 {
-       struct intel_engine_cs *ring;
+       struct intel_engine_cs *engine;
        struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
+       u32 reserved_tail;
        int ret;
 
        if (WARN_ON(request == NULL))
                return;
 
-       ring = request->ring;
-       dev_priv = ring->dev->dev_private;
+       engine = request->engine;
+       dev_priv = request->i915;
        ringbuf = request->ringbuf;
 
        /*
@@ -2560,9 +2615,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
         * should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
-       intel_ring_reserved_space_use(ringbuf);
-
        request_start = intel_ring_get_tail(ringbuf);
+       reserved_tail = request->reserved_space;
+       request->reserved_space = 0;
+
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
@@ -2579,6 +2635,28 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
        }
 
+       trace_i915_gem_request_add(request);
+
+       request->head = request_start;
+
+       /* Whilst this request exists, batch_obj will be on the
+        * active_list, and so will hold the active reference. Only when this
+        * request is retired will the the batch_obj be moved onto the
+        * inactive_list and lose its active reference. Hence we do not need
+        * to explicitly hold another reference here.
+        */
+       request->batch_obj = obj;
+
+       /* Seal the request and mark it as pending execution. Note that
+        * we may inspect this state, without holding any locks, during
+        * hangcheck. Hence we apply the barrier to ensure that we do not
+        * see a more recent value in the hws than we are tracking.
+        */
+       request->emitted_jiffies = jiffies;
+       request->previous_seqno = engine->last_submitted_seqno;
+       smp_store_mb(engine->last_submitted_seqno, request->seqno);
+       list_add_tail(&request->list, &engine->request_list);
+
        /* Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
@@ -2587,41 +2665,30 @@ void __i915_add_request(struct drm_i915_gem_request *request,
        request->postfix = intel_ring_get_tail(ringbuf);
 
        if (i915.enable_execlists)
-               ret = ring->emit_request(request);
+               ret = engine->emit_request(request);
        else {
-               ret = ring->add_request(request);
+               ret = engine->add_request(request);
 
                request->tail = intel_ring_get_tail(ringbuf);
        }
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
 
-       request->head = request_start;
-
-       /* Whilst this request exists, batch_obj will be on the
-        * active_list, and so will hold the active reference. Only when this
-        * request is retired will the the batch_obj be moved onto the
-        * inactive_list and lose its active reference. Hence we do not need
-        * to explicitly hold another reference here.
-        */
-       request->batch_obj = obj;
-
-       request->emitted_jiffies = jiffies;
-       request->previous_seqno = ring->last_submitted_seqno;
-       ring->last_submitted_seqno = request->seqno;
-       list_add_tail(&request->list, &ring->request_list);
-
-       trace_i915_gem_request_add(request);
-
-       i915_queue_hangcheck(ring->dev);
+       i915_queue_hangcheck(engine->i915);
 
        queue_delayed_work(dev_priv->wq,
                           &dev_priv->mm.retire_work,
                           round_jiffies_up_relative(HZ));
-       intel_mark_busy(dev_priv->dev);
+       intel_mark_busy(dev_priv);
 
        /* Sanity check that the reserved size was large enough. */
-       intel_ring_reserved_space_end(ringbuf);
+       ret = intel_ring_get_tail(ringbuf) - request_start;
+       if (ret < 0)
+               ret += ringbuf->size;
+       WARN_ONCE(ret > reserved_tail,
+                 "Not enough space reserved (%d bytes) "
+                 "for adding the request (%d bytes)\n",
+                 reserved_tail, ret);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2673,27 +2740,16 @@ void i915_gem_request_free(struct kref *req_ref)
 {
        struct drm_i915_gem_request *req = container_of(req_ref,
                                                 typeof(*req), ref);
-       struct intel_context *ctx = req->ctx;
-
-       if (req->file_priv)
-               i915_gem_request_remove_from_client(req);
-
-       if (ctx) {
-               if (i915.enable_execlists && ctx != req->i915->kernel_context)
-                       intel_lr_context_unpin(ctx, req->ring);
-
-               i915_gem_context_unreference(ctx);
-       }
-
        kmem_cache_free(req->i915->requests, req);
 }
 
 static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *ring,
+__i915_gem_request_alloc(struct intel_engine_cs *engine,
                         struct intel_context *ctx,
                         struct drm_i915_gem_request **req_out)
 {
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
+       unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
        int ret;
 
@@ -2702,29 +2758,29 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
 
        *req_out = NULL;
 
+       /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+        * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+        * and restart.
+        */
+       ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+       if (ret)
+               return ret;
+
        req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
        if (req == NULL)
                return -ENOMEM;
 
-       ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+       ret = i915_gem_get_seqno(engine->i915, &req->seqno);
        if (ret)
                goto err;
 
        kref_init(&req->ref);
        req->i915 = dev_priv;
-       req->ring = ring;
+       req->engine = engine;
+       req->reset_counter = reset_counter;
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
 
-       if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(req);
-       else
-               ret = intel_ring_alloc_request_extras(req);
-       if (ret) {
-               i915_gem_context_unreference(req->ctx);
-               goto err;
-       }
-
        /*
         * Reserve space in the ring buffer for all the commands required to
         * eventually emit this request. This is to guarantee that the
@@ -2732,23 +2788,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
         * to be redone if the request is not actually submitted straight
         * away, e.g. because a GPU scheduler has deferred it.
         */
+       req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
        if (i915.enable_execlists)
-               ret = intel_logical_ring_reserve_space(req);
+               ret = intel_logical_ring_alloc_request_extras(req);
        else
-               ret = intel_ring_reserve_space(req);
-       if (ret) {
-               /*
-                * At this point, the request is fully allocated even if not
-                * fully prepared. Thus it can be cleaned up using the proper
-                * free code.
-                */
-               i915_gem_request_cancel(req);
-               return ret;
-       }
+               ret = intel_ring_alloc_request_extras(req);
+       if (ret)
+               goto err_ctx;
 
        *req_out = req;
        return 0;
 
+err_ctx:
+       i915_gem_context_unreference(ctx);
 err:
        kmem_cache_free(dev_priv->requests, req);
        return ret;
@@ -2774,24 +2827,17 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        int err;
 
        if (ctx == NULL)
-               ctx = to_i915(engine->dev)->kernel_context;
+               ctx = engine->i915->kernel_context;
        err = __i915_gem_request_alloc(engine, ctx, &req);
        return err ? ERR_PTR(err) : req;
 }
 
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
-       intel_ring_reserved_space_cancel(req->ringbuf);
-
-       i915_gem_request_unreference(req);
-}
-
 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring)
+i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
 
-       list_for_each_entry(request, &ring->request_list, list) {
+       list_for_each_entry(request, &engine->request_list, list) {
                if (i915_gem_request_completed(request, false))
                        continue;
 
@@ -2801,38 +2847,38 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
        return NULL;
 }
 
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
-                                      struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
+                                      struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
        bool ring_hung;
 
-       request = i915_gem_find_active_request(ring);
+       request = i915_gem_find_active_request(engine);
 
        if (request == NULL)
                return;
 
-       ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+       ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
        i915_set_reset_status(dev_priv, request->ctx, ring_hung);
 
-       list_for_each_entry_continue(request, &ring->request_list, list)
+       list_for_each_entry_continue(request, &engine->request_list, list)
                i915_set_reset_status(dev_priv, request->ctx, false);
 }
 
-static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
-                                       struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
+                                       struct intel_engine_cs *engine)
 {
        struct intel_ringbuffer *buffer;
 
-       while (!list_empty(&ring->active_list)) {
+       while (!list_empty(&engine->active_list)) {
                struct drm_i915_gem_object *obj;
 
-               obj = list_first_entry(&ring->active_list,
+               obj = list_first_entry(&engine->active_list,
                                       struct drm_i915_gem_object,
-                                      ring_list[ring->id]);
+                                      engine_list[engine->id]);
 
-               i915_gem_object_retire__read(obj, ring->id);
+               i915_gem_object_retire__read(obj, engine->id);
        }
 
        /*
@@ -2842,14 +2888,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
         */
 
        if (i915.enable_execlists) {
-               spin_lock_irq(&ring->execlist_lock);
-
-               /* list_splice_tail_init checks for empty lists */
-               list_splice_tail_init(&ring->execlist_queue,
-                                     &ring->execlist_retired_req_list);
+               /* Ensure irq handler finishes or is cancelled. */
+               tasklet_kill(&engine->irq_tasklet);
 
-               spin_unlock_irq(&ring->execlist_lock);
-               intel_execlists_retire_requests(ring);
+               intel_execlists_cancel_requests(engine);
        }
 
        /*
@@ -2859,10 +2901,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
         * implicit references on things like e.g. ppgtt address spaces through
         * the request.
         */
-       while (!list_empty(&ring->request_list)) {
+       while (!list_empty(&engine->request_list)) {
                struct drm_i915_gem_request *request;
 
-               request = list_first_entry(&ring->request_list,
+               request = list_first_entry(&engine->request_list,
                                           struct drm_i915_gem_request,
                                           list);
 
@@ -2876,28 +2918,29 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
         * upon reset is less than when we start. Do one more pass over
         * all the ringbuffers to reset last_retired_head.
         */
-       list_for_each_entry(buffer, &ring->buffers, link) {
+       list_for_each_entry(buffer, &engine->buffers, link) {
                buffer->last_retired_head = buffer->tail;
                intel_ring_update_space(buffer);
        }
+
+       intel_ring_init_seqno(engine, engine->last_submitted_seqno);
 }
 
 void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       int i;
+       struct intel_engine_cs *engine;
 
        /*
         * Before we free the objects from the requests, we need to inspect
         * them for finding the guilty party. As the requests only borrow
         * their reference to the objects, the inspection must be done first.
         */
-       for_each_ring(ring, dev_priv, i)
-               i915_gem_reset_ring_status(dev_priv, ring);
+       for_each_engine(engine, dev_priv)
+               i915_gem_reset_engine_status(dev_priv, engine);
 
-       for_each_ring(ring, dev_priv, i)
-               i915_gem_reset_ring_cleanup(dev_priv, ring);
+       for_each_engine(engine, dev_priv)
+               i915_gem_reset_engine_cleanup(dev_priv, engine);
 
        i915_gem_context_reset(dev);
 
@@ -2910,19 +2953,19 @@ void i915_gem_reset(struct drm_device *dev)
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
+i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 {
-       WARN_ON(i915_verify_lists(ring->dev));
+       WARN_ON(i915_verify_lists(engine->dev));
 
        /* Retire requests first as we use it above for the early return.
         * If we retire requests last, we may use a later seqno and so clear
         * the requests lists without clearing the active list, leading to
         * confusion.
         */
-       while (!list_empty(&ring->request_list)) {
+       while (!list_empty(&engine->request_list)) {
                struct drm_i915_gem_request *request;
 
-               request = list_first_entry(&ring->request_list,
+               request = list_first_entry(&engine->request_list,
                                           struct drm_i915_gem_request,
                                           list);
 
@@ -2936,45 +2979,41 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
         * by the ringbuffer to the flushing/inactive lists as appropriate,
         * before we free the context associated with the requests.
         */
-       while (!list_empty(&ring->active_list)) {
+       while (!list_empty(&engine->active_list)) {
                struct drm_i915_gem_object *obj;
 
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list[ring->id]);
+               obj = list_first_entry(&engine->active_list,
+                                      struct drm_i915_gem_object,
+                                      engine_list[engine->id]);
 
-               if (!list_empty(&obj->last_read_req[ring->id]->list))
+               if (!list_empty(&obj->last_read_req[engine->id]->list))
                        break;
 
-               i915_gem_object_retire__read(obj, ring->id);
+               i915_gem_object_retire__read(obj, engine->id);
        }
 
-       if (unlikely(ring->trace_irq_req &&
-                    i915_gem_request_completed(ring->trace_irq_req, true))) {
-               ring->irq_put(ring);
-               i915_gem_request_assign(&ring->trace_irq_req, NULL);
+       if (unlikely(engine->trace_irq_req &&
+                    i915_gem_request_completed(engine->trace_irq_req, true))) {
+               engine->irq_put(engine);
+               i915_gem_request_assign(&engine->trace_irq_req, NULL);
        }
 
-       WARN_ON(i915_verify_lists(ring->dev));
+       WARN_ON(i915_verify_lists(engine->dev));
 }
 
 bool
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
+       struct intel_engine_cs *engine;
        bool idle = true;
-       int i;
 
-       for_each_ring(ring, dev_priv, i) {
-               i915_gem_retire_requests_ring(ring);
-               idle &= list_empty(&ring->request_list);
+       for_each_engine(engine, dev_priv) {
+               i915_gem_retire_requests_ring(engine);
+               idle &= list_empty(&engine->request_list);
                if (i915.enable_execlists) {
-                       spin_lock_irq(&ring->execlist_lock);
-                       idle &= list_empty(&ring->execlist_queue);
-                       spin_unlock_irq(&ring->execlist_lock);
-
-                       intel_execlists_retire_requests(ring);
+                       spin_lock_bh(&engine->execlist_lock);
+                       idle &= list_empty(&engine->execlist_queue);
+                       spin_unlock_bh(&engine->execlist_lock);
                }
        }
 
@@ -2997,7 +3036,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
        /* Come back later if the device is busy... */
        idle = false;
        if (mutex_trylock(&dev->struct_mutex)) {
-               idle = i915_gem_retire_requests(dev);
+               idle = i915_gem_retire_requests(dev_priv);
                mutex_unlock(&dev->struct_mutex);
        }
        if (!idle)
@@ -3011,25 +3050,21 @@ i915_gem_idle_work_handler(struct work_struct *work)
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv), mm.idle_work.work);
        struct drm_device *dev = dev_priv->dev;
-       struct intel_engine_cs *ring;
-       int i;
+       struct intel_engine_cs *engine;
 
-       for_each_ring(ring, dev_priv, i)
-               if (!list_empty(&ring->request_list))
+       for_each_engine(engine, dev_priv)
+               if (!list_empty(&engine->request_list))
                        return;
 
        /* we probably should sync with hangcheck here, using cancel_work_sync.
-        * Also locking seems to be fubar here, ring->request_list is protected
+        * Also locking seems to be fubar here, engine->request_list is protected
         * by dev->struct_mutex. */
 
-       intel_mark_idle(dev);
+       intel_mark_idle(dev_priv);
 
        if (mutex_trylock(&dev->struct_mutex)) {
-               struct intel_engine_cs *ring;
-               int i;
-
-               for_each_ring(ring, dev_priv, i)
-                       i915_gem_batch_pool_fini(&ring->batch_pool);
+               for_each_engine(engine, dev_priv)
+                       i915_gem_batch_pool_fini(&engine->batch_pool);
 
                mutex_unlock(&dev->struct_mutex);
        }
@@ -3048,21 +3083,15 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
        if (!obj->active)
                return 0;
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
+       for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct drm_i915_gem_request *req;
 
                req = obj->last_read_req[i];
                if (req == NULL)
                        continue;
 
-               if (list_empty(&req->list))
-                       goto retire;
-
-               if (i915_gem_request_completed(req, true)) {
-                       __i915_gem_request_retire__upto(req);
-retire:
+               if (i915_gem_request_completed(req, true))
                        i915_gem_object_retire__read(obj, i);
-               }
        }
 
        return 0;
@@ -3093,11 +3122,9 @@ retire:
 int
 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
-       struct drm_i915_gem_request *req[I915_NUM_RINGS];
-       unsigned reset_counter;
+       struct drm_i915_gem_request *req[I915_NUM_ENGINES];
        int i, n = 0;
        int ret;
 
@@ -3108,7 +3135,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
        if (&obj->base == NULL) {
                mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
@@ -3131,9 +3158,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        }
 
        drm_gem_object_unreference(&obj->base);
-       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
+       for (i = 0; i < I915_NUM_ENGINES; i++) {
                if (obj->last_read_req[i] == NULL)
                        continue;
 
@@ -3144,10 +3170,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
        for (i = 0; i < n; i++) {
                if (ret == 0)
-                       ret = __i915_wait_request(req[i], reset_counter, true,
+                       ret = __i915_wait_request(req[i], true,
                                                  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                                  to_rps_client(file));
-               i915_gem_request_unreference__unlocked(req[i]);
+               i915_gem_request_unreference(req[i]);
        }
        return ret;
 
@@ -3166,17 +3192,16 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
        struct intel_engine_cs *from;
        int ret;
 
-       from = i915_gem_request_get_ring(from_req);
+       from = i915_gem_request_get_engine(from_req);
        if (to == from)
                return 0;
 
        if (i915_gem_request_completed(from_req, true))
                return 0;
 
-       if (!i915_semaphore_is_enabled(obj->base.dev)) {
+       if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
                struct drm_i915_private *i915 = to_i915(obj->base.dev);
                ret = __i915_wait_request(from_req,
-                                         atomic_read(&i915->gpu_error.reset_counter),
                                          i915->mm.interruptible,
                                          NULL,
                                          &i915->rps.semaphores);
@@ -3260,7 +3285,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_request **to_req)
 {
        const bool readonly = obj->base.pending_write_domain == 0;
-       struct drm_i915_gem_request *req[I915_NUM_RINGS];
+       struct drm_i915_gem_request *req[I915_NUM_ENGINES];
        int ret, i, n;
 
        if (!obj->active)
@@ -3274,7 +3299,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
                if (obj->last_write_req)
                        req[n++] = obj->last_write_req;
        } else {
-               for (i = 0; i < I915_NUM_RINGS; i++)
+               for (i = 0; i < I915_NUM_ENGINES; i++)
                        if (obj->last_read_req[i])
                                req[n++] = obj->last_read_req[i];
        }
@@ -3297,9 +3322,6 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
        if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
                return;
 
-       /* Wait for any direct GTT access to complete */
-       mb();
-
        old_read_domains = obj->base.read_domains;
        old_write_domain = obj->base.write_domain;
 
@@ -3311,6 +3333,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
                                            old_write_domain);
 }
 
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+       GEM_BUG_ON(vma->pin_count);
+
+       if (vma->iomap == NULL)
+               return;
+
+       io_mapping_unmap(vma->iomap);
+       vma->iomap = NULL;
+}
+
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
        struct drm_i915_gem_object *obj = vma->obj;
@@ -3343,6 +3376,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
                ret = i915_gem_object_put_fence(obj);
                if (ret)
                        return ret;
+
+               __i915_vma_iounmap(vma);
        }
 
        trace_i915_vma_unbind(vma);
@@ -3391,28 +3426,25 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
 int i915_gpu_idle(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       int ret, i;
+       struct intel_engine_cs *engine;
+       int ret;
 
        /* Flush everything onto the inactive list. */
-       for_each_ring(ring, dev_priv, i) {
+       for_each_engine(engine, dev_priv) {
                if (!i915.enable_execlists) {
                        struct drm_i915_gem_request *req;
 
-                       req = i915_gem_request_alloc(ring, NULL);
+                       req = i915_gem_request_alloc(engine, NULL);
                        if (IS_ERR(req))
                                return PTR_ERR(req);
 
                        ret = i915_switch_context(req);
-                       if (ret) {
-                               i915_gem_request_cancel(req);
-                               return ret;
-                       }
-
                        i915_add_request_no_flush(req);
+                       if (ret)
+                               return ret;
                }
 
-               ret = intel_ring_idle(ring);
+               ret = intel_engine_idle(engine);
                if (ret)
                        return ret;
        }
@@ -3466,7 +3498,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           uint64_t flags)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        u32 fence_alignment, unfenced_alignment;
        u32 search_flag, alloc_flag;
        u64 start, end;
@@ -3513,7 +3546,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
        end = vm->total;
        if (flags & PIN_MAPPABLE)
-               end = min_t(u64, end, dev_priv->gtt.mappable_end);
+               end = min_t(u64, end, ggtt->mappable_end);
        if (flags & PIN_ZONE_4G)
                end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
 
@@ -3699,7 +3732,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
                return;
 
        if (i915_gem_clflush_object(obj, obj->pin_display))
-               i915_gem_chipset_flush(obj->base.dev);
+               i915_gem_chipset_flush(to_i915(obj->base.dev));
 
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
@@ -3720,6 +3753,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        uint32_t old_write_domain, old_read_domains;
        struct i915_vma *vma;
        int ret;
@@ -3774,7 +3810,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        vma = i915_gem_obj_to_ggtt(obj);
        if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
                list_move_tail(&vma->vm_link,
-                              &to_i915(obj->base.dev)->gtt.base.inactive_list);
+                              &ggtt->base.inactive_list);
 
        return 0;
 }
@@ -3894,7 +3930,7 @@ out:
            obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
            cpu_write_needs_clflush(obj)) {
                if (i915_gem_clflush_object(obj, true))
-                       i915_gem_chipset_flush(obj->base.dev);
+                       i915_gem_chipset_flush(to_i915(obj->base.dev));
        }
 
        return 0;
@@ -3906,7 +3942,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL)
                return -ENOENT;
 
@@ -3949,7 +3985,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
                 * cacheline, whereas normally such cachelines would get
                 * invalidated.
                 */
-               if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+               if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
                        return -ENODEV;
 
                level = I915_CACHE_LLC;
@@ -3967,7 +4003,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto rpm_put;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@ -4128,16 +4164,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_gem_request *request, *target = NULL;
-       unsigned reset_counter;
        int ret;
 
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
        if (ret)
                return ret;
 
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
-       if (ret)
-               return ret;
+       /* ABI: return -EIO if already wedged */
+       if (i915_terminally_wedged(&dev_priv->gpu_error))
+               return -EIO;
 
        spin_lock(&file_priv->mm.lock);
        list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -4153,7 +4188,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 
                target = request;
        }
-       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        if (target)
                i915_gem_request_reference(target);
        spin_unlock(&file_priv->mm.lock);
@@ -4161,11 +4195,11 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (target == NULL)
                return 0;
 
-       ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+       ret = __i915_wait_request(target, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
-       i915_gem_request_unreference__unlocked(target);
+       i915_gem_request_unreference(target);
 
        return ret;
 }
@@ -4211,7 +4245,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
                     (vma->node.start & (fence_alignment - 1)) == 0);
 
        mappable = (vma->node.start + fence_size <=
-                   to_i915(obj->base.dev)->gtt.mappable_end);
+                   to_i915(obj->base.dev)->ggtt.mappable_end);
 
        obj->map_and_fenceable = mappable && fenceable;
 }
@@ -4243,9 +4277,6 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
        vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
                          i915_gem_obj_to_vma(obj, vm);
 
-       if (IS_ERR(vma))
-               return PTR_ERR(vma);
-
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                        return -EBUSY;
@@ -4308,10 +4339,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         uint32_t alignment,
                         uint64_t flags)
 {
-       if (WARN_ONCE(!view, "no view specified"))
-               return -EINVAL;
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-       return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+       BUG_ON(!view);
+
+       return i915_gem_object_do_pin(obj, &ggtt->base, view,
                                      alignment, flags | PIN_GLOBAL);
 }
 
@@ -4321,7 +4355,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 {
        struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
-       BUG_ON(!vma);
        WARN_ON(vma->pin_count == 0);
        WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
 
@@ -4340,7 +4373,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@ -4359,15 +4392,15 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        if (obj->active) {
                int i;
 
-               for (i = 0; i < I915_NUM_RINGS; i++) {
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
                        struct drm_i915_gem_request *req;
 
                        req = obj->last_read_req[i];
                        if (req)
-                               args->busy |= 1 << (16 + req->ring->exec_id);
+                               args->busy |= 1 << (16 + req->engine->exec_id);
                }
                if (obj->last_write_req)
-                       args->busy |= obj->last_write_req->ring->exec_id;
+                       args->busy |= obj->last_write_req->engine->exec_id;
        }
 
 unref:
@@ -4405,7 +4438,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+       obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@ -4447,8 +4480,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        int i;
 
        INIT_LIST_HEAD(&obj->global_list);
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               INIT_LIST_HEAD(&obj->ring_list[i]);
+       for (i = 0; i < I915_NUM_ENGINES; i++)
+               INIT_LIST_HEAD(&obj->engine_list[i]);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4467,21 +4500,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
        .put_pages = i915_gem_object_put_pages_gtt,
 };
 
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
                                                  size_t size)
 {
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
        gfp_t mask;
+       int ret;
 
        obj = i915_gem_object_alloc(dev);
        if (obj == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
-       if (drm_gem_object_init(dev, &obj->base, size) != 0) {
-               i915_gem_object_free(obj);
-               return NULL;
-       }
+       ret = drm_gem_object_init(dev, &obj->base, size);
+       if (ret)
+               goto fail;
 
        mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
        if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4518,6 +4551,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        trace_i915_gem_object_create(obj);
 
        return obj;
+
+fail:
+       i915_gem_object_free(obj);
+
+       return ERR_PTR(ret);
 }
 
 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4623,15 +4661,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
                                           const struct i915_ggtt_view *view)
 {
-       struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
        struct i915_vma *vma;
 
-       if (WARN_ONCE(!view, "no view specified"))
-               return ERR_PTR(-EINVAL);
+       GEM_BUG_ON(!view);
 
        list_for_each_entry(vma, &obj->vma_list, obj_link)
-               if (vma->vm == ggtt &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma;
        return NULL;
 }
@@ -4653,14 +4688,13 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
 }
 
 static void
-i915_gem_stop_ringbuffers(struct drm_device *dev)
+i915_gem_stop_engines(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       int i;
+       struct intel_engine_cs *engine;
 
-       for_each_ring(ring, dev_priv, i)
-               dev_priv->gt.stop_ring(ring);
+       for_each_engine(engine, dev_priv)
+               dev_priv->gt.stop_engine(engine);
 }
 
 int
@@ -4674,9 +4708,10 @@ i915_gem_suspend(struct drm_device *dev)
        if (ret)
                goto err;
 
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev_priv);
 
-       i915_gem_stop_ringbuffers(dev);
+       i915_gem_stop_engines(dev);
+       i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4695,37 +4730,6 @@ err:
        return ret;
 }
 
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
-{
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
-       int i, ret;
-
-       if (!HAS_L3_DPF(dev) || !remap_info)
-               return 0;
-
-       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
-       if (ret)
-               return ret;
-
-       /*
-        * Note: We do not worry about the concurrent register cacheline hang
-        * here because no other code should access these registers other than
-        * at initialization time.
-        */
-       for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
-               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-               intel_ring_emit(ring, remap_info[i]);
-       }
-
-       intel_ring_advance(ring);
-
-       return ret;
-}
-
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4778,7 +4782,7 @@ static void init_unused_rings(struct drm_device *dev)
        }
 }
 
-int i915_gem_init_rings(struct drm_device *dev)
+int i915_gem_init_engines(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
@@ -4814,13 +4818,13 @@ int i915_gem_init_rings(struct drm_device *dev)
        return 0;
 
 cleanup_vebox_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
+       intel_cleanup_engine(&dev_priv->engine[VECS]);
 cleanup_blt_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+       intel_cleanup_engine(&dev_priv->engine[BCS]);
 cleanup_bsd_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+       intel_cleanup_engine(&dev_priv->engine[VCS]);
 cleanup_render_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+       intel_cleanup_engine(&dev_priv->engine[RCS]);
 
        return ret;
 }
@@ -4829,16 +4833,13 @@ int
 i915_gem_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       int ret, i, j;
-
-       if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
-               return -EIO;
+       struct intel_engine_cs *engine;
+       int ret;
 
        /* Double layer security blanket, see i915_gem_init() */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       if (dev_priv->ellc_size)
+       if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
                I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
        if (IS_HASWELL(dev))
@@ -4876,12 +4877,14 @@ i915_gem_init_hw(struct drm_device *dev)
        }
 
        /* Need to do basic initialisation of all rings first: */
-       for_each_ring(ring, dev_priv, i) {
-               ret = ring->init_hw(ring);
+       for_each_engine(engine, dev_priv) {
+               ret = engine->init_hw(engine);
                if (ret)
                        goto out;
        }
 
+       intel_mocs_init_l3cc_table(dev);
+
        /* We can't enable contexts until all firmware is loaded */
        if (HAS_GUC_UCODE(dev)) {
                ret = intel_guc_ucode_load(dev);
@@ -4897,43 +4900,6 @@ i915_gem_init_hw(struct drm_device *dev)
         * on re-initialisation
         */
        ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
-       if (ret)
-               goto out;
-
-       /* Now it is safe to go back round and do everything else: */
-       for_each_ring(ring, dev_priv, i) {
-               struct drm_i915_gem_request *req;
-
-               req = i915_gem_request_alloc(ring, NULL);
-               if (IS_ERR(req)) {
-                       ret = PTR_ERR(req);
-                       i915_gem_cleanup_ringbuffer(dev);
-                       goto out;
-               }
-
-               if (ring->id == RCS) {
-                       for (j = 0; j < NUM_L3_SLICES(dev); j++)
-                               i915_gem_l3_remap(req, j);
-               }
-
-               ret = i915_ppgtt_init_ring(req);
-               if (ret && ret != -EIO) {
-                       DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
-                       i915_gem_request_cancel(req);
-                       i915_gem_cleanup_ringbuffer(dev);
-                       goto out;
-               }
-
-               ret = i915_gem_context_enable(req);
-               if (ret && ret != -EIO) {
-                       DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
-                       i915_gem_request_cancel(req);
-                       i915_gem_cleanup_ringbuffer(dev);
-                       goto out;
-               }
-
-               i915_add_request_no_flush(req);
-       }
 
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -4945,21 +4911,18 @@ int i915_gem_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       i915.enable_execlists = intel_sanitize_enable_execlists(dev,
-                       i915.enable_execlists);
-
        mutex_lock(&dev->struct_mutex);
 
        if (!i915.enable_execlists) {
                dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
-               dev_priv->gt.init_rings = i915_gem_init_rings;
-               dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
-               dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+               dev_priv->gt.init_engines = i915_gem_init_engines;
+               dev_priv->gt.cleanup_engine = intel_cleanup_engine;
+               dev_priv->gt.stop_engine = intel_stop_engine;
        } else {
                dev_priv->gt.execbuf_submit = intel_execlists_submission;
-               dev_priv->gt.init_rings = intel_logical_rings_init;
-               dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
-               dev_priv->gt.stop_ring = intel_logical_ring_stop;
+               dev_priv->gt.init_engines = intel_logical_rings_init;
+               dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+               dev_priv->gt.stop_engine = intel_logical_ring_stop;
        }
 
        /* This is just a security blanket to placate dragons.
@@ -4970,17 +4933,14 @@ int i915_gem_init(struct drm_device *dev)
         */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       ret = i915_gem_init_userptr(dev);
-       if (ret)
-               goto out_unlock;
-
-       i915_gem_init_global_gtt(dev);
+       i915_gem_init_userptr(dev_priv);
+       i915_gem_init_ggtt(dev);
 
        ret = i915_gem_context_init(dev);
        if (ret)
                goto out_unlock;
 
-       ret = dev_priv->gt.init_rings(dev);
+       ret = dev_priv->gt.init_engines(dev);
        if (ret)
                goto out_unlock;
 
@@ -5003,29 +4963,44 @@ out_unlock:
 }
 
 void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       int i;
-
-       for_each_ring(ring, dev_priv, i)
-               dev_priv->gt.cleanup_ring(ring);
+       struct intel_engine_cs *engine;
 
-    if (i915.enable_execlists)
-            /*
-             * Neither the BIOS, ourselves or any other kernel
-             * expects the system to be in execlists mode on startup,
-             * so we need to reset the GPU back to legacy mode.
-             */
-            intel_gpu_reset(dev);
+       for_each_engine(engine, dev_priv)
+               dev_priv->gt.cleanup_engine(engine);
 }
 
 static void
-init_ring_lists(struct intel_engine_cs *ring)
+init_engine_lists(struct intel_engine_cs *engine)
 {
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
+       INIT_LIST_HEAD(&engine->active_list);
+       INIT_LIST_HEAD(&engine->request_list);
+}
+
+void
+i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+
+       if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
+           !IS_CHERRYVIEW(dev_priv))
+               dev_priv->num_fence_regs = 32;
+       else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
+                IS_I945GM(dev_priv) || IS_G33(dev_priv))
+               dev_priv->num_fence_regs = 16;
+       else
+               dev_priv->num_fence_regs = 8;
+
+       if (intel_vgpu_active(dev_priv))
+               dev_priv->num_fence_regs =
+                               I915_READ(vgtif_reg(avail_rs.fence_num));
+
+       /* Initialize fence registers to zero */
+       i915_gem_restore_fences(dev);
+
+       i915_gem_detect_bit_6_swizzle(dev);
 }
 
 void
@@ -5055,8 +5030,8 @@ i915_gem_load_init(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               init_ring_lists(&dev_priv->ring[i]);
+       for (i = 0; i < I915_NUM_ENGINES; i++)
+               init_engine_lists(&dev_priv->engine[i]);
        for (i = 0; i < I915_MAX_NUM_FENCES; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -5067,17 +5042,6 @@ i915_gem_load_init(struct drm_device *dev)
 
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-       if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
-               dev_priv->num_fence_regs = 32;
-       else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-               dev_priv->num_fence_regs = 16;
-       else
-               dev_priv->num_fence_regs = 8;
-
-       if (intel_vgpu_active(dev))
-               dev_priv->num_fence_regs =
-                               I915_READ(vgtif_reg(avail_rs.fence_num));
-
        /*
         * Set initial sequence number for requests.
         * Using this number allows the wraparound to happen early,
@@ -5086,11 +5050,8 @@ i915_gem_load_init(struct drm_device *dev)
        dev_priv->next_seqno = ((u32)~0 - 0x1100);
        dev_priv->last_seqno = ((u32)~0 - 0x1101);
 
-       /* Initialize fence registers to zero */
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-       i915_gem_restore_fences(dev);
 
-       i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
 
        dev_priv->mm.interruptible = true;
@@ -5107,6 +5068,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
        kmem_cache_destroy(dev_priv->objects);
 }
 
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       /* Called just before we write the hibernation image.
+        *
+        * We need to update the domain tracking to reflect that the CPU
+        * will be accessing all the pages to create and restore from the
+        * hibernation, and so upon restoration those pages will be in the
+        * CPU domain.
+        *
+        * To make sure the hibernation image contains the latest state,
+        * we update that state just before writing out the image.
+        */
+
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+
+       return 0;
+}
+
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5213,12 +5202,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
 {
-       struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == ggtt &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
 
        WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5244,11 +5231,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
 {
-       struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == ggtt &&
+               if (vma->is_ggtt &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
                        return true;
@@ -5267,23 +5253,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
        return false;
 }
 
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
-                               struct i915_address_space *vm)
+unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
 {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
-       BUG_ON(list_empty(&o->vma_list));
+       GEM_BUG_ON(list_empty(&o->vma_list));
 
        list_for_each_entry(vma, &o->vma_list, obj_link) {
                if (vma->is_ggtt &&
-                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-                       continue;
-               if (vma->vm == vm)
+                   vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
                        return vma->node.size;
        }
+
        return 0;
 }
 
@@ -5322,8 +5303,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
        size_t bytes;
        int ret;
 
-       obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
-       if (IS_ERR_OR_NULL(obj))
+       obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+       if (IS_ERR(obj))
                return obj;
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);