Merge tag 'drm-intel-next-2014-12-19' of git://anongit.freedesktop.org/drm-intel...
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 52adcb6..3044fb3 100644 (file)
@@ -1151,19 +1151,18 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
 }
 
 /*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
+ * Compare arbitrary request against outstanding lazy request. Emit on match.
  */
 int
-i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+i915_gem_check_olr(struct drm_i915_gem_request *req)
 {
        int ret;
 
-       BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
 
        ret = 0;
-       if (seqno == ring->outstanding_lazy_seqno)
-               ret = i915_add_request(ring, NULL);
+       if (req == req->ring->outstanding_lazy_request)
+               ret = i915_add_request(req->ring);
 
        return ret;
 }
@@ -1188,10 +1187,9 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
 }
 
 /**
- * __i915_wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @reset_counter: reset sequence associated with the given seqno
+ * __i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @reset_counter: reset sequence associated with the given request
  * @interruptible: do an interruptible wait (normally yes)
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  *
@@ -1202,15 +1200,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
  * inserted.
  *
- * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * Returns 0 if the request was found within the alloted time. Else returns the
  * errno with remaining time filled in timeout argument.
  */
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_request(struct drm_i915_gem_request *req,
                        unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
                        struct drm_i915_file_private *file_priv)
 {
+       struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        const bool irq_test_in_progress =
@@ -1222,7 +1221,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 
        WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
 
-       if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+       if (i915_gem_request_completed(req, true))
                return 0;
 
        timeout_expire = timeout ?
@@ -1240,7 +1239,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                return -ENODEV;
 
        /* Record current time in case interrupted by signal, or wedged */
-       trace_i915_gem_request_wait_begin(ring, seqno);
+       trace_i915_gem_request_wait_begin(req);
        before = ktime_get_raw_ns();
        for (;;) {
                struct timer_list timer;
@@ -1259,7 +1258,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                        break;
                }
 
-               if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+               if (i915_gem_request_completed(req, false)) {
                        ret = 0;
                        break;
                }
@@ -1291,7 +1290,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                }
        }
        now = ktime_get_raw_ns();
-       trace_i915_gem_request_wait_end(ring, seqno);
+       trace_i915_gem_request_wait_end(req);
 
        if (!irq_test_in_progress)
                ring->irq_put(ring);
@@ -1318,32 +1317,40 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
 }
 
 /**
- * Waits for a sequence number to be signaled, and cleans up the
+ * Waits for a request to be signaled, and cleans up the
  * request and object lists appropriately for that event.
  */
 int
-i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
+i915_wait_request(struct drm_i915_gem_request *req)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       bool interruptible = dev_priv->mm.interruptible;
+       struct drm_device *dev;
+       struct drm_i915_private *dev_priv;
+       bool interruptible;
        unsigned reset_counter;
        int ret;
 
+       BUG_ON(req == NULL);
+
+       dev = req->ring->dev;
+       dev_priv = dev->dev_private;
+       interruptible = dev_priv->mm.interruptible;
+
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-       BUG_ON(seqno == 0);
 
        ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
        if (ret)
                return ret;
 
-       ret = i915_gem_check_olr(ring, seqno);
+       ret = i915_gem_check_olr(req);
        if (ret)
                return ret;
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-       return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
-                                NULL, NULL);
+       i915_gem_request_reference(req);
+       ret = __i915_wait_request(req, reset_counter,
+                                 interruptible, NULL, NULL);
+       i915_gem_request_unreference(req);
+       return ret;
 }
 
 static int
@@ -1355,11 +1362,11 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
        /* Manually manage the write flush as we may have not yet
         * retired the buffer.
         *
-        * Note that the last_write_seqno is always the earlier of
-        * the two (read/write) seqno, so if we haved successfully waited,
+        * Note that the last_write_req is always the earlier of
+        * the two (read/write) requests, so if we haved successfully waited,
         * we know we have passed the last write.
         */
-       obj->last_write_seqno = 0;
+       i915_gem_request_assign(&obj->last_write_req, NULL);
 
        return 0;
 }
@@ -1372,15 +1379,14 @@ static __must_check int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly)
 {
-       struct intel_engine_cs *ring = obj->ring;
-       u32 seqno;
+       struct drm_i915_gem_request *req;
        int ret;
 
-       seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-       if (seqno == 0)
+       req = readonly ? obj->last_write_req : obj->last_read_req;
+       if (!req)
                return 0;
 
-       ret = i915_wait_seqno(ring, seqno);
+       ret = i915_wait_request(req);
        if (ret)
                return ret;
 
@@ -1395,33 +1401,33 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                                            struct drm_i915_file_private *file_priv,
                                            bool readonly)
 {
+       struct drm_i915_gem_request *req;
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = obj->ring;
        unsigned reset_counter;
-       u32 seqno;
        int ret;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!dev_priv->mm.interruptible);
 
-       seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
-       if (seqno == 0)
+       req = readonly ? obj->last_write_req : obj->last_read_req;
+       if (!req)
                return 0;
 
        ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
        if (ret)
                return ret;
 
-       ret = i915_gem_check_olr(ring, seqno);
+       ret = i915_gem_check_olr(req);
        if (ret)
                return ret;
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
-       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
-                               file_priv);
+       ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
        mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(req);
        if (ret)
                return ret;
 
@@ -2250,14 +2256,18 @@ static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_engine_cs *ring)
 {
-       u32 seqno = intel_ring_get_seqno(ring);
+       struct drm_i915_gem_request *req;
+       struct intel_engine_cs *old_ring;
 
        BUG_ON(ring == NULL);
-       if (obj->ring != ring && obj->last_write_seqno) {
-               /* Keep the seqno relative to the current ring */
-               obj->last_write_seqno = seqno;
+
+       req = intel_ring_get_request(ring);
+       old_ring = i915_gem_request_get_ring(obj->last_read_req);
+
+       if (old_ring != ring && obj->last_write_req) {
+               /* Keep the request relative to the current ring */
+               i915_gem_request_assign(&obj->last_write_req, req);
        }
-       obj->ring = ring;
 
        /* Add a reference if we're newly entering the active list. */
        if (!obj->active) {
@@ -2267,7 +2277,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 
        list_move_tail(&obj->ring_list, &ring->active_list);
 
-       obj->last_read_seqno = seqno;
+       i915_gem_request_assign(&obj->last_read_req, req);
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2280,29 +2290,25 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct i915_address_space *vm;
        struct i915_vma *vma;
 
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-               vma = i915_gem_obj_to_vma(obj, vm);
-               if (vma && !list_empty(&vma->mm_list))
-                       list_move_tail(&vma->mm_list, &vm->inactive_list);
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (!list_empty(&vma->mm_list))
+                       list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
        }
 
        intel_fb_obj_flush(obj, true);
 
        list_del_init(&obj->ring_list);
-       obj->ring = NULL;
 
-       obj->last_read_seqno = 0;
-       obj->last_write_seqno = 0;
+       i915_gem_request_assign(&obj->last_read_req, NULL);
+       i915_gem_request_assign(&obj->last_write_req, NULL);
        obj->base.write_domain = 0;
 
-       obj->last_fenced_seqno = 0;
+       i915_gem_request_assign(&obj->last_fenced_req, NULL);
 
        obj->active = 0;
        drm_gem_object_unreference(&obj->base);
@@ -2313,13 +2319,10 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_retire(struct drm_i915_gem_object *obj)
 {
-       struct intel_engine_cs *ring = obj->ring;
-
-       if (ring == NULL)
+       if (obj->last_read_req == NULL)
                return;
 
-       if (i915_seqno_passed(ring->get_seqno(ring, true),
-                             obj->last_read_seqno))
+       if (i915_gem_request_completed(obj->last_read_req, true))
                i915_gem_object_move_to_inactive(obj);
 }
 
@@ -2395,8 +2398,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 
 int __i915_add_request(struct intel_engine_cs *ring,
                       struct drm_file *file,
-                      struct drm_i915_gem_object *obj,
-                      u32 *out_seqno)
+                      struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
@@ -2404,7 +2406,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
        u32 request_ring_position, request_start;
        int ret;
 
-       request = ring->preallocated_lazy_request;
+       request = ring->outstanding_lazy_request;
        if (WARN_ON(request == NULL))
                return -ENOMEM;
 
@@ -2449,8 +2451,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
                        return ret;
        }
 
-       request->seqno = intel_ring_get_seqno(ring);
-       request->ring = ring;
        request->head = request_start;
        request->tail = request_ring_position;
 
@@ -2485,9 +2485,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
                spin_unlock(&file_priv->mm.lock);
        }
 
-       trace_i915_gem_request_add(ring, request->seqno);
-       ring->outstanding_lazy_seqno = 0;
-       ring->preallocated_lazy_request = NULL;
+       trace_i915_gem_request_add(request);
+       ring->outstanding_lazy_request = NULL;
 
        i915_queue_hangcheck(ring->dev);
 
@@ -2497,8 +2496,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
                           round_jiffies_up_relative(HZ));
        intel_mark_busy(dev_priv->dev);
 
-       if (out_seqno)
-               *out_seqno = request->seqno;
        return 0;
 }
 
@@ -2562,33 +2559,39 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
 
 static void i915_gem_free_request(struct drm_i915_gem_request *request)
 {
-       struct intel_context *ctx = request->ctx;
-
        list_del(&request->list);
        i915_gem_request_remove_from_client(request);
 
+       i915_gem_request_unreference(request);
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+       struct drm_i915_gem_request *req = container_of(req_ref,
+                                                typeof(*req), ref);
+       struct intel_context *ctx = req->ctx;
+
        if (ctx) {
                if (i915.enable_execlists) {
-                       struct intel_engine_cs *ring = request->ring;
+                       struct intel_engine_cs *ring = req->ring;
 
                        if (ctx != ring->default_context)
                                intel_lr_context_unpin(ring, ctx);
                }
+
                i915_gem_context_unreference(ctx);
        }
-       kfree(request);
+
+       kfree(req);
 }
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_request *request;
-       u32 completed_seqno;
-
-       completed_seqno = ring->get_seqno(ring, false);
 
        list_for_each_entry(request, &ring->request_list, list) {
-               if (i915_seqno_passed(completed_seqno, request->seqno))
+               if (i915_gem_request_completed(request, false))
                        continue;
 
                return request;
@@ -2663,10 +2666,8 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                i915_gem_free_request(request);
        }
 
-       /* These may not have been flush before the reset, do so now */
-       kfree(ring->preallocated_lazy_request);
-       ring->preallocated_lazy_request = NULL;
-       ring->outstanding_lazy_seqno = 0;
+       /* This may not have been flushed before the reset, so clean it now */
+       i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2718,15 +2719,11 @@ void i915_gem_reset(struct drm_device *dev)
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 {
-       uint32_t seqno;
-
        if (list_empty(&ring->request_list))
                return;
 
        WARN_ON(i915_verify_lists(ring->dev));
 
-       seqno = ring->get_seqno(ring, true);
-
        /* Move any buffers on the active list that are no longer referenced
         * by the ringbuffer to the flushing/inactive lists as appropriate,
         * before we free the context associated with the requests.
@@ -2738,7 +2735,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                                      struct drm_i915_gem_object,
                                      ring_list);
 
-               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+               if (!i915_gem_request_completed(obj->last_read_req, true))
                        break;
 
                i915_gem_object_move_to_inactive(obj);
@@ -2753,10 +2750,10 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                                           struct drm_i915_gem_request,
                                           list);
 
-               if (!i915_seqno_passed(seqno, request->seqno))
+               if (!i915_gem_request_completed(request, true))
                        break;
 
-               trace_i915_gem_request_retire(ring, request->seqno);
+               trace_i915_gem_request_retire(request);
 
                /* This is one of the few common intersection points
                 * between legacy ringbuffer submission and execlists:
@@ -2779,10 +2776,10 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                i915_gem_free_request(request);
        }
 
-       if (unlikely(ring->trace_irq_seqno &&
-                    i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+       if (unlikely(ring->trace_irq_req &&
+                    i915_gem_request_completed(ring->trace_irq_req, true))) {
                ring->irq_put(ring);
-               ring->trace_irq_seqno = 0;
+               i915_gem_request_assign(&ring->trace_irq_req, NULL);
        }
 
        WARN_ON(i915_verify_lists(ring->dev));
@@ -2854,14 +2851,17 @@ i915_gem_idle_work_handler(struct work_struct *work)
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
+       struct intel_engine_cs *ring;
        int ret;
 
        if (obj->active) {
-               ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+               ring = i915_gem_request_get_ring(obj->last_read_req);
+
+               ret = i915_gem_check_olr(obj->last_read_req);
                if (ret)
                        return ret;
 
-               i915_gem_retire_requests_ring(obj->ring);
+               i915_gem_retire_requests_ring(ring);
        }
 
        return 0;
@@ -2895,9 +2895,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
-       struct intel_engine_cs *ring = NULL;
+       struct drm_i915_gem_request *req;
        unsigned reset_counter;
-       u32 seqno = 0;
        int ret = 0;
 
        if (args->flags != 0)
@@ -2918,13 +2917,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (ret)
                goto out;
 
-       if (obj->active) {
-               seqno = obj->last_read_seqno;
-               ring = obj->ring;
-       }
+       if (!obj->active || !obj->last_read_req)
+               goto out;
 
-       if (seqno == 0)
-                goto out;
+       req = obj->last_read_req;
 
        /* Do this after OLR check to make sure we make forward progress polling
         * on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2936,10 +2932,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
        drm_gem_object_unreference(&obj->base);
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
 
-       return __i915_wait_seqno(ring, seqno, reset_counter, true,
-                                &args->timeout_ns, file->driver_priv);
+       ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+                                 file->driver_priv);
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(req);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
 
 out:
        drm_gem_object_unreference(&obj->base);
@@ -2963,10 +2964,12 @@ int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
                     struct intel_engine_cs *to)
 {
-       struct intel_engine_cs *from = obj->ring;
+       struct intel_engine_cs *from;
        u32 seqno;
        int ret, idx;
 
+       from = i915_gem_request_get_ring(obj->last_read_req);
+
        if (from == NULL || to == from)
                return 0;
 
@@ -2975,24 +2978,25 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
        idx = intel_ring_sync_index(from, to);
 
-       seqno = obj->last_read_seqno;
+       seqno = i915_gem_request_get_seqno(obj->last_read_req);
        /* Optimization: Avoid semaphore sync when we are sure we already
         * waited for an object with higher seqno */
        if (seqno <= from->semaphore.sync_seqno[idx])
                return 0;
 
-       ret = i915_gem_check_olr(obj->ring, seqno);
+       ret = i915_gem_check_olr(obj->last_read_req);
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_sync_to(from, to, seqno);
+       trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
        ret = to->semaphore.sync_to(to, from, seqno);
        if (!ret)
-               /* We use last_read_seqno because sync_to()
+               /* We use last_read_req because sync_to()
                 * might have just caused seqno wrap under
                 * the radar.
                 */
-               from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
+               from->semaphore.sync_seqno[idx] =
+                               i915_gem_request_get_seqno(obj->last_read_req);
 
        return ret;
 }
@@ -3048,10 +3052,8 @@ int i915_vma_unbind(struct i915_vma *vma)
         * cause memory corruption through use-after-free.
         */
 
-       /* Throw away the active reference before moving to the unbound list */
-       i915_gem_object_retire(obj);
-
-       if (i915_is_ggtt(vma->vm)) {
+       if (i915_is_ggtt(vma->vm) &&
+           vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
                i915_gem_object_finish_gtt(obj);
 
                /* release the fence reg _after_ flushing */
@@ -3065,8 +3067,15 @@ int i915_vma_unbind(struct i915_vma *vma)
        vma->unbind_vma(vma);
 
        list_del_init(&vma->mm_list);
-       if (i915_is_ggtt(vma->vm))
-               obj->map_and_fenceable = false;
+       if (i915_is_ggtt(vma->vm)) {
+               if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+                       obj->map_and_fenceable = false;
+               } else if (vma->ggtt_view.pages) {
+                       sg_free_table(vma->ggtt_view.pages);
+                       kfree(vma->ggtt_view.pages);
+                       vma->ggtt_view.pages = NULL;
+               }
+       }
 
        drm_mm_remove_node(&vma->node);
        i915_gem_vma_destroy(vma);
@@ -3074,6 +3083,10 @@ int i915_vma_unbind(struct i915_vma *vma)
        /* Since the unbound list is global, only move to that list if
         * no more VMAs exist. */
        if (list_empty(&obj->vma_list)) {
+               /* Throw away the active reference before
+                * moving to the unbound list. */
+               i915_gem_object_retire(obj);
+
                i915_gem_gtt_finish_object(obj);
                list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
        }
@@ -3257,17 +3270,12 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
             "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
             obj->stride, obj->tiling_mode);
 
-       switch (INTEL_INFO(dev)->gen) {
-       case 9:
-       case 8:
-       case 7:
-       case 6:
-       case 5:
-       case 4: i965_write_fence_reg(dev, reg, obj); break;
-       case 3: i915_write_fence_reg(dev, reg, obj); break;
-       case 2: i830_write_fence_reg(dev, reg, obj); break;
-       default: BUG();
-       }
+       if (IS_GEN2(dev))
+               i830_write_fence_reg(dev, reg, obj);
+       else if (IS_GEN3(dev))
+               i915_write_fence_reg(dev, reg, obj);
+       else if (INTEL_INFO(dev)->gen >= 4)
+               i965_write_fence_reg(dev, reg, obj);
 
        /* And similarly be paranoid that no direct access to this region
         * is reordered to before the fence is installed.
@@ -3306,12 +3314,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 static int
 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 {
-       if (obj->last_fenced_seqno) {
-               int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+       if (obj->last_fenced_req) {
+               int ret = i915_wait_request(obj->last_fenced_req);
                if (ret)
                        return ret;
 
-               obj->last_fenced_seqno = 0;
+               i915_gem_request_assign(&obj->last_fenced_req, NULL);
        }
 
        return 0;
@@ -3484,7 +3492,8 @@ static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           struct i915_address_space *vm,
                           unsigned alignment,
-                          uint64_t flags)
+                          uint64_t flags,
+                          const struct i915_ggtt_view *view)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3534,7 +3543,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
-       vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+       vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
        if (IS_ERR(vma))
                goto err_unpin;
 
@@ -3564,15 +3573,19 @@ search_free:
        if (ret)
                goto err_remove_node;
 
+       trace_i915_vma_bind(vma, flags);
+       ret = i915_vma_bind(vma, obj->cache_level,
+                           flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+       if (ret)
+               goto err_finish_gtt;
+
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->mm_list, &vm->inactive_list);
 
-       trace_i915_vma_bind(vma, flags);
-       vma->bind_vma(vma, obj->cache_level,
-                     flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
-
        return vma;
 
+err_finish_gtt:
+       i915_gem_gtt_finish_object(obj);
 err_remove_node:
        drm_mm_remove_node(&vma->node);
 err_free_vma:
@@ -3775,9 +3788,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                }
 
                list_for_each_entry(vma, &obj->vma_list, vma_link)
-                       if (drm_mm_node_allocated(&vma->node))
-                               vma->bind_vma(vma, cache_level,
-                                               vma->bound & GLOBAL_BIND);
+                       if (drm_mm_node_allocated(&vma->node)) {
+                               ret = i915_vma_bind(vma, cache_level,
+                                                   vma->bound & GLOBAL_BIND);
+                               if (ret)
+                                       return ret;
+                       }
        }
 
        list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3896,18 +3912,14 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
        if (!vma)
                return false;
 
-       /* There are 3 sources that pin objects:
+       /* There are 2 sources that pin objects:
         *   1. The display engine (scanouts, sprites, cursors);
         *   2. Reservations for execbuffer;
-        *   3. The user.
         *
         * We can ignore reservations as we hold the struct_mutex and
-        * are only called outside of the reservation path.  The user
-        * can only increment pin_count once, and so if after
-        * subtracting the potential reference by the user, any pin_count
-        * remains, it must be due to another use by the display engine.
+        * are only called outside of the reservation path.
         */
-       return vma->pin_count - !!obj->user_pin_count;
+       return vma->pin_count;
 }
 
 /*
@@ -3924,7 +3936,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
        bool was_pin_display;
        int ret;
 
-       if (pipelined != obj->ring) {
+       if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
                ret = i915_gem_object_sync(obj, pipelined);
                if (ret)
                        return ret;
@@ -4076,10 +4088,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
-       struct drm_i915_gem_request *request;
-       struct intel_engine_cs *ring = NULL;
+       struct drm_i915_gem_request *request, *target = NULL;
        unsigned reset_counter;
-       u32 seqno = 0;
        int ret;
 
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -4095,19 +4105,24 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
 
-               ring = request->ring;
-               seqno = request->seqno;
+               target = request;
        }
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       if (target)
+               i915_gem_request_reference(target);
        spin_unlock(&file_priv->mm.lock);
 
-       if (seqno == 0)
+       if (target == NULL)
                return 0;
 
-       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+       ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_request_unreference(target);
+       mutex_unlock(&dev->struct_mutex);
+
        return ret;
 }
 
@@ -4131,10 +4146,11 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
 }
 
 int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-                   struct i915_address_space *vm,
-                   uint32_t alignment,
-                   uint64_t flags)
+i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
+                        struct i915_address_space *vm,
+                        uint32_t alignment,
+                        uint64_t flags,
+                        const struct i915_ggtt_view *view)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct i915_vma *vma;
@@ -4150,7 +4166,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
                return -EINVAL;
 
-       vma = i915_gem_obj_to_vma(obj, vm);
+       vma = i915_gem_obj_to_vma_view(obj, vm, view);
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                        return -EBUSY;
@@ -4160,7 +4176,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                             "bo is already pinned with incorrect alignment:"
                             " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            i915_gem_obj_offset(obj, vm), alignment,
+                            i915_gem_obj_offset_view(obj, vm, view->type),
+                            alignment,
                             !!(flags & PIN_MAPPABLE),
                             obj->map_and_fenceable);
                        ret = i915_vma_unbind(vma);
@@ -4173,13 +4190,17 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 
        bound = vma ? vma->bound : 0;
        if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-               vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+               vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
+                                                flags, view);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
        }
 
-       if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
-               vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+       if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
+               ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+               if (ret)
+                       return ret;
+       }
 
        if ((bound ^ vma->bound) & GLOBAL_BIND) {
                bool mappable, fenceable;
@@ -4250,102 +4271,6 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
        }
 }
 
-int
-i915_gem_pin_ioctl(struct drm_device *dev, void *data,
-                  struct drm_file *file)
-{
-       struct drm_i915_gem_pin *args = data;
-       struct drm_i915_gem_object *obj;
-       int ret;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
-
-       if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_DEBUG("Attempting to pin a purgeable buffer\n");
-               ret = -EFAULT;
-               goto out;
-       }
-
-       if (obj->pin_filp != NULL && obj->pin_filp != file) {
-               DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
-                         args->handle);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (obj->user_pin_count == ULONG_MAX) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       if (obj->user_pin_count == 0) {
-               ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
-               if (ret)
-                       goto out;
-       }
-
-       obj->user_pin_count++;
-       obj->pin_filp = file;
-
-       args->offset = i915_gem_obj_ggtt_offset(obj);
-out:
-       drm_gem_object_unreference(&obj->base);
-unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
-int
-i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-                    struct drm_file *file)
-{
-       struct drm_i915_gem_pin *args = data;
-       struct drm_i915_gem_object *obj;
-       int ret;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
-       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
-       if (&obj->base == NULL) {
-               ret = -ENOENT;
-               goto unlock;
-       }
-
-       if (obj->pin_filp != file) {
-               DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
-                         args->handle);
-               ret = -EINVAL;
-               goto out;
-       }
-       obj->user_pin_count--;
-       if (obj->user_pin_count == 0) {
-               obj->pin_filp = NULL;
-               i915_gem_object_ggtt_unpin(obj);
-       }
-
-out:
-       drm_gem_object_unreference(&obj->base);
-unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
 int
 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
@@ -4372,9 +4297,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        ret = i915_gem_object_flush_active(obj);
 
        args->busy = obj->active;
-       if (obj->ring) {
+       if (obj->last_read_req) {
+               struct intel_engine_cs *ring;
                BUILD_BUG_ON(I915_NUM_RINGS > 16);
-               args->busy |= intel_ring_flag(obj->ring) << 16;
+               ring = i915_gem_request_get_ring(obj->last_read_req);
+               args->busy |= intel_ring_flag(ring) << 16;
        }
 
        drm_gem_object_unreference(&obj->base);
@@ -4454,6 +4381,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
+       INIT_LIST_HEAD(&obj->batch_pool_list);
 
        obj->ops = ops;
 
@@ -4609,12 +4537,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        intel_runtime_pm_put(dev_priv);
 }
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                                    struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
+                                         struct i915_address_space *vm,
+                                         const struct i915_ggtt_view *view)
 {
        struct i915_vma *vma;
        list_for_each_entry(vma, &obj->vma_list, vma_link)
-               if (vma->vm == vm)
+               if (vma->vm == vm && vma->ggtt_view.type == view->type)
                        return vma;
 
        return NULL;
@@ -4674,6 +4603,11 @@ i915_gem_suspend(struct drm_device *dev)
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
        flush_delayed_work(&dev_priv->mm.idle_work);
 
+       /* Assert that we sucessfully flushed all the work and
+        * reset the GPU back to its idle, low power state.
+        */
+       WARN_ON(dev_priv->mm.busy);
+
        return 0;
 
 err:
@@ -4785,14 +4719,6 @@ int i915_gem_init_rings(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       /*
-        * At least 830 can leave some of the unused rings
-        * "active" (ie. head != tail) after resume which
-        * will prevent c3 entry. Makes sure all unused rings
-        * are totally idle.
-        */
-       init_unused_rings(dev);
-
        ret = intel_init_render_ring_buffer(dev);
        if (ret)
                return ret;
@@ -4845,6 +4771,7 @@ int
 i915_gem_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
        int ret, i;
 
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@@ -4871,9 +4798,19 @@ i915_gem_init_hw(struct drm_device *dev)
 
        i915_gem_init_swizzling(dev);
 
-       ret = dev_priv->gt.init_rings(dev);
-       if (ret)
-               return ret;
+       /*
+        * At least 830 can leave some of the unused rings
+        * "active" (ie. head != tail) after resume which
+        * will prevent c3 entry. Makes sure all unused rings
+        * are totally idle.
+        */
+       init_unused_rings(dev);
+
+       for_each_ring(ring, dev_priv, i) {
+               ret = ring->init_hw(ring);
+               if (ret)
+                       return ret;
+       }
 
        for (i = 0; i < NUM_L3_SLICES(dev); i++)
                i915_gem_l3_remap(&dev_priv->ring[RCS], i);
@@ -4933,18 +4870,18 @@ int i915_gem_init(struct drm_device *dev)
        }
 
        ret = i915_gem_init_userptr(dev);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       if (ret)
+               goto out_unlock;
 
        i915_gem_init_global_gtt(dev);
 
        ret = i915_gem_context_init(dev);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       if (ret)
+               goto out_unlock;
+
+       ret = dev_priv->gt.init_rings(dev);
+       if (ret)
+               goto out_unlock;
 
        ret = i915_gem_init_hw(dev);
        if (ret == -EIO) {
@@ -4956,6 +4893,8 @@ int i915_gem_init(struct drm_device *dev)
                atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
                ret = 0;
        }
+
+out_unlock:
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
@@ -5056,6 +4995,8 @@ i915_gem_load(struct drm_device *dev)
        dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
        register_oom_notifier(&dev_priv->mm.oom_notifier);
 
+       i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
+
        mutex_init(&dev_priv->fb_tracking.lock);
 }
 
@@ -5216,8 +5157,9 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 }
 
 /* All the new VM stuff */
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
-                                 struct i915_address_space *vm)
+unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
+                                      struct i915_address_space *vm,
+                                      enum i915_ggtt_view_type view)
 {
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
@@ -5225,7 +5167,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
        WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
        list_for_each_entry(vma, &o->vma_list, vma_link) {
-               if (vma->vm == vm)
+               if (vma->vm == vm && vma->ggtt_view.type == view)
                        return vma->node.start;
 
        }
@@ -5234,13 +5176,16 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
        return -1;
 }
 
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-                       struct i915_address_space *vm)
+bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
+                            struct i915_address_space *vm,
+                            enum i915_ggtt_view_type view)
 {
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, vma_link)
-               if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+               if (vma->vm == vm &&
+                   vma->ggtt_view.type == view &&
+                   drm_mm_node_allocated(&vma->node))
                        return true;
 
        return false;
@@ -5372,11 +5317,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 
 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 {
+       struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
        struct i915_vma *vma;
 
-       vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
-       if (vma->vm != i915_obj_to_ggtt(obj))
-               return NULL;
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->vm == ggtt &&
+                   vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
+                       return vma;
 
-       return vma;
+       return NULL;
 }