Merge branch 'for-next' of git://git.samba.org/sfrench/cifs-2.6
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index 6bc68bd..279488a 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
-static inline int ring_space(struct intel_ring_buffer *ring)
+/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+
+static inline int __ring_space(int head, int tail, int size)
 {
-       int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+       int space = head - (tail + I915_RING_FREE_SPACE);
        if (space < 0)
-               space += ring->size;
+               space += size;
        return space;
 }
 
-void __intel_ring_advance(struct intel_ring_buffer *ring)
+static inline int ring_space(struct intel_engine_cs *ring)
+{
+       struct intel_ringbuffer *ringbuf = ring->buffer;
+       return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
+}
+
+static bool intel_ring_stopped(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
+}
 
-       ring->tail &= ring->size - 1;
-       if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+void __intel_ring_advance(struct intel_engine_cs *ring)
+{
+       struct intel_ringbuffer *ringbuf = ring->buffer;
+       ringbuf->tail &= ringbuf->size - 1;
+       if (intel_ring_stopped(ring))
                return;
-       ring->write_tail(ring, ring->tail);
+       ring->write_tail(ring, ringbuf->tail);
 }
 
 static int
-gen2_render_ring_flush(struct intel_ring_buffer *ring,
+gen2_render_ring_flush(struct intel_engine_cs *ring,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
@@ -78,7 +96,7 @@ gen2_render_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-gen4_render_ring_flush(struct intel_ring_buffer *ring,
+gen4_render_ring_flush(struct intel_engine_cs *ring,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
@@ -173,9 +191,9 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
 {
-       u32 scratch_addr = ring->scratch.gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
 
@@ -208,11 +226,11 @@ intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
 }
 
 static int
-gen6_render_ring_flush(struct intel_ring_buffer *ring,
+gen6_render_ring_flush(struct intel_engine_cs *ring,
                          u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
-       u32 scratch_addr = ring->scratch.gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -260,7 +278,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
 {
        int ret;
 
@@ -278,7 +296,7 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
        return 0;
 }
 
-static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
+static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
 {
        int ret;
 
@@ -302,11 +320,11 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
 }
 
 static int
-gen7_render_ring_flush(struct intel_ring_buffer *ring,
+gen7_render_ring_flush(struct intel_engine_cs *ring,
                       u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
-       u32 scratch_addr = ring->scratch.gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /*
@@ -363,11 +381,11 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-gen8_render_ring_flush(struct intel_ring_buffer *ring,
+gen8_render_ring_flush(struct intel_engine_cs *ring,
                       u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
-       u32 scratch_addr = ring->scratch.gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        flags |= PIPE_CONTROL_CS_STALL;
@@ -403,14 +421,14 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring,
 
 }
 
-static void ring_write_tail(struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_engine_cs *ring,
                            u32 value)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        I915_WRITE_TAIL(ring, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        u64 acthd;
@@ -426,7 +444,7 @@ u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
        return acthd;
 }
 
-static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        u32 addr;
@@ -437,32 +455,42 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
        I915_WRITE(HWS_PGA, addr);
 }
 
-static int init_ring_common(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj = ring->obj;
-       int ret = 0;
-       u32 head;
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+       if (!IS_GEN2(ring->dev)) {
+               I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+               if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+                       DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+                       return false;
+               }
+       }
 
-       /* Stop the ring if it's running. */
        I915_WRITE_CTL(ring, 0);
        I915_WRITE_HEAD(ring, 0);
        ring->write_tail(ring, 0);
-       if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
-               DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
 
-       if (I915_NEED_GFX_HWS(dev))
-               intel_ring_setup_status_page(ring);
-       else
-               ring_setup_phys_status_page(ring);
+       if (!IS_GEN2(ring->dev)) {
+               (void)I915_READ_CTL(ring);
+               I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+       }
+
+       return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
 
-       head = I915_READ_HEAD(ring) & HEAD_ADDR;
+static int init_ring_common(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct drm_i915_gem_object *obj = ringbuf->obj;
+       int ret = 0;
 
-       /* G45 ring initialization fails to reset head to zero */
-       if (head != 0) {
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+       if (!stop_ring(ring)) {
+               /* G45 ring initialization often fails to reset head to zero */
                DRM_DEBUG_KMS("%s head not reset to zero "
                              "ctl %08x head %08x tail %08x start %08x\n",
                              ring->name,
@@ -471,9 +499,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                              I915_READ_TAIL(ring),
                              I915_READ_START(ring));
 
-               I915_WRITE_HEAD(ring, 0);
-
-               if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+               if (!stop_ring(ring)) {
                        DRM_ERROR("failed to set %s head to zero "
                                  "ctl %08x head %08x tail %08x start %08x\n",
                                  ring->name,
@@ -481,16 +507,23 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                                  I915_READ_HEAD(ring),
                                  I915_READ_TAIL(ring),
                                  I915_READ_START(ring));
+                       ret = -EIO;
+                       goto out;
                }
        }
 
+       if (I915_NEED_GFX_HWS(dev))
+               intel_ring_setup_status_page(ring);
+       else
+               ring_setup_phys_status_page(ring);
+
        /* Initialize the ring. This must happen _after_ we've cleared the ring
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
        I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
        I915_WRITE_CTL(ring,
-                       ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+                       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
@@ -498,12 +531,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                     I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
                     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
-                               "ctl %08x head %08x tail %08x start %08x\n",
-                               ring->name,
-                               I915_READ_CTL(ring),
-                               I915_READ_HEAD(ring),
-                               I915_READ_TAIL(ring),
-                               I915_READ_START(ring));
+                         "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
+                         ring->name,
+                         I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
+                         I915_READ_HEAD(ring), I915_READ_TAIL(ring),
+                         I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
                ret = -EIO;
                goto out;
        }
@@ -511,10 +543,10 @@ static int init_ring_common(struct intel_ring_buffer *ring)
        if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
                i915_kernel_lost_context(ring->dev);
        else {
-               ring->head = I915_READ_HEAD(ring);
-               ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-               ring->space = ring_space(ring);
-               ring->last_retired_head = -1;
+               ringbuf->head = I915_READ_HEAD(ring);
+               ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+               ringbuf->space = ring_space(ring);
+               ringbuf->last_retired_head = -1;
        }
 
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -526,7 +558,7 @@ out:
 }
 
 static int
-init_pipe_control(struct intel_ring_buffer *ring)
+init_pipe_control(struct intel_engine_cs *ring)
 {
        int ret;
 
@@ -567,7 +599,7 @@ err:
        return ret;
 }
 
-static int init_render_ring(struct intel_ring_buffer *ring)
+static int init_render_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -581,19 +613,21 @@ static int init_render_ring(struct intel_ring_buffer *ring)
         * to use MI_WAIT_FOR_EVENT within the CS. It should already be
         * programmed to '1' on all products.
         *
-        * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
+        * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
         */
        if (INTEL_INFO(dev)->gen >= 6)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
+       /* WaEnableFlushTlbInvalidationMode:snb */
        if (INTEL_INFO(dev)->gen == 6)
                I915_WRITE(GFX_MODE,
-                          _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+                          _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
+       /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
        if (IS_GEN7(dev))
                I915_WRITE(GFX_MODE_GEN7,
-                          _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+                          _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
        if (INTEL_INFO(dev)->gen >= 5) {
@@ -610,13 +644,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
                 */
                I915_WRITE(CACHE_MODE_0,
                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
-
-               /* This is not explicitly set for GEN6, so read the register.
-                * see intel_ring_mi_set_context() for why we care.
-                * TODO: consider explicitly setting the bit for GEN5
-                */
-               ring->itlb_before_ctx_switch =
-                       !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
        }
 
        if (INTEL_INFO(dev)->gen >= 6)
@@ -628,7 +655,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
        return ret;
 }
 
-static void render_ring_cleanup(struct intel_ring_buffer *ring)
+static void render_ring_cleanup(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
 
@@ -644,20 +671,46 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
        ring->scratch.obj = NULL;
 }
 
-static void
-update_mboxes(struct intel_ring_buffer *ring,
-             u32 mmio_offset)
+static int gen6_signal(struct intel_engine_cs *signaller,
+                      unsigned int num_dwords)
 {
-/* NB: In order to be able to do semaphore MBOX updates for varying number
- * of rings, it's easiest if we round up each individual update to a
- * multiple of 2 (since ring updates must always be a multiple of 2)
- * even though the actual update only requires 3 dwords.
- */
+       struct drm_device *dev = signaller->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *useless;
+       int i, ret;
+
+       /* NB: In order to be able to do semaphore MBOX updates for varying
+        * number of rings, it's easiest if we round up each individual update
+        * to a multiple of 2 (since ring updates must always be a multiple of
+        * 2) even though the actual update only requires 3 dwords.
+        */
 #define MBOX_UPDATE_DWORDS 4
-       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit(ring, mmio_offset);
-       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
-       intel_ring_emit(ring, MI_NOOP);
+       if (i915_semaphore_is_enabled(dev))
+               num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
+       else
+               return intel_ring_begin(signaller, num_dwords);
+
+       ret = intel_ring_begin(signaller, num_dwords);
+       if (ret)
+               return ret;
+#undef MBOX_UPDATE_DWORDS
+
+       for_each_ring(useless, dev_priv, i) {
+               u32 mbox_reg = signaller->semaphore.mbox.signal[i];
+               if (mbox_reg != GEN6_NOSYNC) {
+                       intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
+                       intel_ring_emit(signaller, mbox_reg);
+                       intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+                       intel_ring_emit(signaller, MI_NOOP);
+               } else {
+                       intel_ring_emit(signaller, MI_NOOP);
+                       intel_ring_emit(signaller, MI_NOOP);
+                       intel_ring_emit(signaller, MI_NOOP);
+                       intel_ring_emit(signaller, MI_NOOP);
+               }
+       }
+
+       return 0;
 }
 
 /**
@@ -670,29 +723,14 @@ update_mboxes(struct intel_ring_buffer *ring,
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_ring_buffer *ring)
+gen6_add_request(struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *useless;
-       int i, ret, num_dwords = 4;
-
-       if (i915_semaphore_is_enabled(dev))
-               num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
-#undef MBOX_UPDATE_DWORDS
+       int ret;
 
-       ret = intel_ring_begin(ring, num_dwords);
+       ret = ring->semaphore.signal(ring, 4);
        if (ret)
                return ret;
 
-       if (i915_semaphore_is_enabled(dev)) {
-               for_each_ring(useless, dev_priv, i) {
-                       u32 mbox_reg = ring->signal_mbox[i];
-                       if (mbox_reg != GEN6_NOSYNC)
-                               update_mboxes(ring, mbox_reg);
-               }
-       }
-
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        intel_ring_emit(ring, ring->outstanding_lazy_seqno);
@@ -717,14 +755,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
  * @seqno - seqno which the waiter will block on
  */
 static int
-gen6_ring_sync(struct intel_ring_buffer *waiter,
-              struct intel_ring_buffer *signaller,
+gen6_ring_sync(struct intel_engine_cs *waiter,
+              struct intel_engine_cs *signaller,
               u32 seqno)
 {
-       int ret;
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
+       u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+       int ret;
 
        /* Throughout all of the GEM code, seqno passed implies our current
         * seqno is >= the last seqno executed. However for hardware the
@@ -732,8 +771,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
         */
        seqno -= 1;
 
-       WARN_ON(signaller->semaphore_register[waiter->id] ==
-               MI_SEMAPHORE_SYNC_INVALID);
+       WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
        ret = intel_ring_begin(waiter, 4);
        if (ret)
@@ -741,9 +779,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
 
        /* If seqno wrap happened, omit the wait with no-ops */
        if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
-               intel_ring_emit(waiter,
-                               dw1 |
-                               signaller->semaphore_register[waiter->id]);
+               intel_ring_emit(waiter, dw1 | wait_mbox);
                intel_ring_emit(waiter, seqno);
                intel_ring_emit(waiter, 0);
                intel_ring_emit(waiter, MI_NOOP);
@@ -768,9 +804,9 @@ do {                                                                        \
 } while (0)
 
 static int
-pc_render_add_request(struct intel_ring_buffer *ring)
+pc_render_add_request(struct intel_engine_cs *ring)
 {
-       u32 scratch_addr = ring->scratch.gtt_offset + 128;
+       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -792,15 +828,15 @@ pc_render_add_request(struct intel_ring_buffer *ring)
        intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 128; /* write to separate cachelines */
+       scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 128;
+       scratch_addr += 2 * CACHELINE_BYTES;
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 128;
+       scratch_addr += 2 * CACHELINE_BYTES;
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 128;
+       scratch_addr += 2 * CACHELINE_BYTES;
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 128;
+       scratch_addr += 2 * CACHELINE_BYTES;
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
 
        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
@@ -816,7 +852,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
 }
 
 static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
 {
        /* Workaround to force correct ordering between irq and seqno writes on
         * ivb (and maybe also on snb) by reading from a CS register (like
@@ -830,31 +866,31 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
 }
 
 static u32
-ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
 {
        return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
 static void
-ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
        intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
 static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
 {
        return ring->scratch.cpu_page[0];
 }
 
 static void
-pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
        ring->scratch.cpu_page[0] = seqno;
 }
 
 static bool
-gen5_ring_get_irq(struct intel_ring_buffer *ring)
+gen5_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -872,7 +908,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
 }
 
 static void
-gen5_ring_put_irq(struct intel_ring_buffer *ring)
+gen5_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -885,7 +921,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
 }
 
 static bool
-i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+i9xx_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -906,7 +942,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
 }
 
 static void
-i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+i9xx_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -922,7 +958,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
 }
 
 static bool
-i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -943,7 +979,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
 }
 
 static void
-i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -958,7 +994,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -975,6 +1011,11 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
                case BCS:
                        mmio = BLT_HWS_PGA_GEN7;
                        break;
+               /*
+                * VCS2 actually doesn't exist on Gen7. Only shut up
+                * gcc switch check warning
+                */
+               case VCS2:
                case VCS:
                        mmio = BSD_HWS_PGA_GEN7;
                        break;
@@ -1016,7 +1057,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
 }
 
 static int
-bsd_ring_flush(struct intel_ring_buffer *ring,
+bsd_ring_flush(struct intel_engine_cs *ring,
               u32     invalidate_domains,
               u32     flush_domains)
 {
@@ -1033,7 +1074,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-i9xx_add_request(struct intel_ring_buffer *ring)
+i9xx_add_request(struct intel_engine_cs *ring)
 {
        int ret;
 
@@ -1051,7 +1092,7 @@ i9xx_add_request(struct intel_ring_buffer *ring)
 }
 
 static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring)
+gen6_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1076,7 +1117,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
 }
 
 static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring)
+gen6_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1094,7 +1135,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
 }
 
 static bool
-hsw_vebox_get_irq(struct intel_ring_buffer *ring)
+hsw_vebox_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1114,7 +1155,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
 }
 
 static void
-hsw_vebox_put_irq(struct intel_ring_buffer *ring)
+hsw_vebox_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1132,7 +1173,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
 }
 
 static bool
-gen8_ring_get_irq(struct intel_ring_buffer *ring)
+gen8_ring_get_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1158,7 +1199,7 @@ gen8_ring_get_irq(struct intel_ring_buffer *ring)
 }
 
 static void
-gen8_ring_put_irq(struct intel_ring_buffer *ring)
+gen8_ring_put_irq(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1178,8 +1219,8 @@ gen8_ring_put_irq(struct intel_ring_buffer *ring)
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                        u32 offset, u32 length,
+i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+                        u64 offset, u32 length,
                         unsigned flags)
 {
        int ret;
@@ -1201,8 +1242,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
 #define I830_BATCH_LIMIT (256*1024)
 static int
-i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                               u32 offset, u32 len,
+i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+                               u64 offset, u32 len,
                                unsigned flags)
 {
        int ret;
@@ -1252,8 +1293,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
 }
 
 static int
-i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                        u32 offset, u32 len,
+i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+                        u64 offset, u32 len,
                         unsigned flags)
 {
        int ret;
@@ -1269,7 +1310,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static void cleanup_status_page(struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_object *obj;
 
@@ -1283,50 +1324,44 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
        ring->status_page.obj = NULL;
 }
 
-static int init_status_page(struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = ring->dev;
        struct drm_i915_gem_object *obj;
-       int ret;
 
-       obj = i915_gem_alloc_object(dev, 4096);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate status page\n");
-               ret = -ENOMEM;
-               goto err;
-       }
+       if ((obj = ring->status_page.obj) == NULL) {
+               int ret;
 
-       ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-       if (ret)
-               goto err_unref;
+               obj = i915_gem_alloc_object(ring->dev, 4096);
+               if (obj == NULL) {
+                       DRM_ERROR("Failed to allocate status page\n");
+                       return -ENOMEM;
+               }
 
-       ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
-       if (ret)
-               goto err_unref;
+               ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+               if (ret)
+                       goto err_unref;
+
+               ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+               if (ret) {
+err_unref:
+                       drm_gem_object_unreference(&obj->base);
+                       return ret;
+               }
+
+               ring->status_page.obj = obj;
+       }
 
        ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
        ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
-       if (ring->status_page.page_addr == NULL) {
-               ret = -ENOMEM;
-               goto err_unpin;
-       }
-       ring->status_page.obj = obj;
        memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
                        ring->name, ring->status_page.gfx_addr);
 
        return 0;
-
-err_unpin:
-       i915_gem_object_ggtt_unpin(obj);
-err_unref:
-       drm_gem_object_unreference(&obj->base);
-err:
-       return ret;
 }
 
-static int init_phys_status_page(struct intel_ring_buffer *ring)
+static int init_phys_status_page(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
@@ -1343,44 +1378,24 @@ static int init_phys_status_page(struct intel_ring_buffer *ring)
        return 0;
 }
 
-static int intel_init_ring_buffer(struct drm_device *dev,
-                                 struct intel_ring_buffer *ring)
+static int allocate_ring_buffer(struct intel_engine_cs *ring)
 {
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_ringbuffer *ringbuf = ring->buffer;
        struct drm_i915_gem_object *obj;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ring->dev = dev;
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-       ring->size = 32 * PAGE_SIZE;
-       memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
-
-       init_waitqueue_head(&ring->irq_queue);
-
-       if (I915_NEED_GFX_HWS(dev)) {
-               ret = init_status_page(ring);
-               if (ret)
-                       return ret;
-       } else {
-               BUG_ON(ring->id != RCS);
-               ret = init_phys_status_page(ring);
-               if (ret)
-                       return ret;
-       }
+       if (intel_ring_initialized(ring))
+               return 0;
 
        obj = NULL;
        if (!HAS_LLC(dev))
-               obj = i915_gem_object_create_stolen(dev, ring->size);
+               obj = i915_gem_object_create_stolen(dev, ringbuf->size);
        if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, ring->size);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate ringbuffer\n");
-               ret = -ENOMEM;
-               goto err_hws;
-       }
-
-       ring->obj = obj;
+               obj = i915_gem_alloc_object(dev, ringbuf->size);
+       if (obj == NULL)
+               return -ENOMEM;
 
        ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
        if (ret)
@@ -1390,65 +1405,102 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        if (ret)
                goto err_unpin;
 
-       ring->virtual_start =
+       ringbuf->virtual_start =
                ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-                          ring->size);
-       if (ring->virtual_start == NULL) {
-               DRM_ERROR("Failed to map ringbuffer.\n");
+                               ringbuf->size);
+       if (ringbuf->virtual_start == NULL) {
                ret = -EINVAL;
                goto err_unpin;
        }
 
-       ret = ring->init(ring);
-       if (ret)
-               goto err_unmap;
+       ringbuf->obj = obj;
+       return 0;
+
+err_unpin:
+       i915_gem_object_ggtt_unpin(obj);
+err_unref:
+       drm_gem_object_unreference(&obj->base);
+       return ret;
+}
+
+static int intel_init_ring_buffer(struct drm_device *dev,
+                                 struct intel_engine_cs *ring)
+{
+       struct intel_ringbuffer *ringbuf = ring->buffer;
+       int ret;
+
+       if (ringbuf == NULL) {
+               ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+               if (!ringbuf)
+                       return -ENOMEM;
+               ring->buffer = ringbuf;
+       }
+
+       ring->dev = dev;
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       ringbuf->size = 32 * PAGE_SIZE;
+       memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+
+       init_waitqueue_head(&ring->irq_queue);
+
+       if (I915_NEED_GFX_HWS(dev)) {
+               ret = init_status_page(ring);
+               if (ret)
+                       goto error;
+       } else {
+               BUG_ON(ring->id != RCS);
+               ret = init_phys_status_page(ring);
+               if (ret)
+                       goto error;
+       }
+
+       ret = allocate_ring_buffer(ring);
+       if (ret) {
+               DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
+               goto error;
+       }
 
        /* Workaround an erratum on the i830 which causes a hang if
         * the TAIL pointer points to within the last 2 cachelines
         * of the buffer.
         */
-       ring->effective_size = ring->size;
-       if (IS_I830(ring->dev) || IS_845G(ring->dev))
-               ring->effective_size -= 128;
+       ringbuf->effective_size = ringbuf->size;
+       if (IS_I830(dev) || IS_845G(dev))
+               ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
-       i915_cmd_parser_init_ring(ring);
+       ret = i915_cmd_parser_init_ring(ring);
+       if (ret)
+               goto error;
+
+       ret = ring->init(ring);
+       if (ret)
+               goto error;
 
        return 0;
 
-err_unmap:
-       iounmap(ring->virtual_start);
-err_unpin:
-       i915_gem_object_ggtt_unpin(obj);
-err_unref:
-       drm_gem_object_unreference(&obj->base);
-       ring->obj = NULL;
-err_hws:
-       cleanup_status_page(ring);
+error:
+       kfree(ringbuf);
+       ring->buffer = NULL;
        return ret;
 }
 
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 {
-       struct drm_i915_private *dev_priv;
-       int ret;
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct intel_ringbuffer *ringbuf = ring->buffer;
 
-       if (ring->obj == NULL)
+       if (!intel_ring_initialized(ring))
                return;
 
-       /* Disable the ring buffer. The ring must be idle at this point */
-       dev_priv = ring->dev->dev_private;
-       ret = intel_ring_idle(ring);
-       if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
-               DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-                         ring->name, ret);
-
-       I915_WRITE_CTL(ring, 0);
+       intel_stop_ring_buffer(ring);
+       WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
-       iounmap(ring->virtual_start);
+       iounmap(ringbuf->virtual_start);
 
-       i915_gem_object_ggtt_unpin(ring->obj);
-       drm_gem_object_unreference(&ring->obj->base);
-       ring->obj = NULL;
+       i915_gem_object_ggtt_unpin(ringbuf->obj);
+       drm_gem_object_unreference(&ringbuf->obj->base);
+       ringbuf->obj = NULL;
        ring->preallocated_lazy_request = NULL;
        ring->outstanding_lazy_seqno = 0;
 
@@ -1456,44 +1508,34 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
                ring->cleanup(ring);
 
        cleanup_status_page(ring);
+
+       i915_cmd_parser_fini_ring(ring);
+
+       kfree(ringbuf);
+       ring->buffer = NULL;
 }
 
-static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
 {
+       struct intel_ringbuffer *ringbuf = ring->buffer;
        struct drm_i915_gem_request *request;
-       u32 seqno = 0, tail;
+       u32 seqno = 0;
        int ret;
 
-       if (ring->last_retired_head != -1) {
-               ring->head = ring->last_retired_head;
-               ring->last_retired_head = -1;
+       if (ringbuf->last_retired_head != -1) {
+               ringbuf->head = ringbuf->last_retired_head;
+               ringbuf->last_retired_head = -1;
 
-               ring->space = ring_space(ring);
-               if (ring->space >= n)
+               ringbuf->space = ring_space(ring);
+               if (ringbuf->space >= n)
                        return 0;
        }
 
        list_for_each_entry(request, &ring->request_list, list) {
-               int space;
-
-               if (request->tail == -1)
-                       continue;
-
-               space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
-               if (space < 0)
-                       space += ring->size;
-               if (space >= n) {
+               if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
                        seqno = request->seqno;
-                       tail = request->tail;
                        break;
                }
-
-               /* Consume this request in case we need more space than
-                * is available and so need to prevent a race between
-                * updating last_retired_head and direct reads of
-                * I915_RING_HEAD. It also provides a nice sanity check.
-                */
-               request->tail = -1;
        }
 
        if (seqno == 0)
@@ -1503,18 +1545,19 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
        if (ret)
                return ret;
 
-       ring->head = tail;
-       ring->space = ring_space(ring);
-       if (WARN_ON(ring->space < n))
-               return -ENOSPC;
+       i915_gem_retire_requests_ring(ring);
+       ringbuf->head = ringbuf->last_retired_head;
+       ringbuf->last_retired_head = -1;
 
+       ringbuf->space = ring_space(ring);
        return 0;
 }
 
-static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ringbuffer *ringbuf = ring->buffer;
        unsigned long end;
        int ret;
 
@@ -1525,7 +1568,6 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
        /* force the tail write in case we have been skipping them */
        __intel_ring_advance(ring);
 
-       trace_i915_ring_wait_begin(ring);
        /* With GEM the hangcheck timer should kick us out of the loop,
         * leaving it early runs the risk of corrupting GEM state (due
         * to running on almost untested codepaths). But on resume
@@ -1533,12 +1575,13 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
         * case by choosing an insanely large timeout. */
        end = jiffies + 60 * HZ;
 
+       trace_i915_ring_wait_begin(ring);
        do {
-               ring->head = I915_READ_HEAD(ring);
-               ring->space = ring_space(ring);
-               if (ring->space >= n) {
-                       trace_i915_ring_wait_end(ring);
-                       return 0;
+               ringbuf->head = I915_READ_HEAD(ring);
+               ringbuf->space = ring_space(ring);
+               if (ringbuf->space >= n) {
+                       ret = 0;
+                       break;
                }
 
                if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
@@ -1550,38 +1593,49 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
 
                msleep(1);
 
+               if (dev_priv->mm.interruptible && signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+
                ret = i915_gem_check_wedge(&dev_priv->gpu_error,
                                           dev_priv->mm.interruptible);
                if (ret)
-                       return ret;
-       } while (!time_after(jiffies, end));
+                       break;
+
+               if (time_after(jiffies, end)) {
+                       ret = -EBUSY;
+                       break;
+               }
+       } while (1);
        trace_i915_ring_wait_end(ring);
-       return -EBUSY;
+       return ret;
 }
 
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
 {
        uint32_t __iomem *virt;
-       int rem = ring->size - ring->tail;
+       struct intel_ringbuffer *ringbuf = ring->buffer;
+       int rem = ringbuf->size - ringbuf->tail;
 
-       if (ring->space < rem) {
+       if (ringbuf->space < rem) {
                int ret = ring_wait_for_space(ring, rem);
                if (ret)
                        return ret;
        }
 
-       virt = ring->virtual_start + ring->tail;
+       virt = ringbuf->virtual_start + ringbuf->tail;
        rem /= 4;
        while (rem--)
                iowrite32(MI_NOOP, virt++);
 
-       ring->tail = 0;
-       ring->space = ring_space(ring);
+       ringbuf->tail = 0;
+       ringbuf->space = ring_space(ring);
 
        return 0;
 }
 
-int intel_ring_idle(struct intel_ring_buffer *ring)
+int intel_ring_idle(struct intel_engine_cs *ring)
 {
        u32 seqno;
        int ret;
@@ -1605,7 +1659,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
 }
 
 static int
-intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+intel_ring_alloc_seqno(struct intel_engine_cs *ring)
 {
        if (ring->outstanding_lazy_seqno)
                return 0;
@@ -1623,18 +1677,19 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
        return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
-static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+static int __intel_ring_prepare(struct intel_engine_cs *ring,
                                int bytes)
 {
+       struct intel_ringbuffer *ringbuf = ring->buffer;
        int ret;
 
-       if (unlikely(ring->tail + bytes > ring->effective_size)) {
+       if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
                ret = intel_wrap_ring_buffer(ring);
                if (unlikely(ret))
                        return ret;
        }
 
-       if (unlikely(ring->space < bytes)) {
+       if (unlikely(ringbuf->space < bytes)) {
                ret = ring_wait_for_space(ring, bytes);
                if (unlikely(ret))
                        return ret;
@@ -1643,7 +1698,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring,
        return 0;
 }
 
-int intel_ring_begin(struct intel_ring_buffer *ring,
+int intel_ring_begin(struct intel_engine_cs *ring,
                     int num_dwords)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1663,19 +1718,20 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
-       ring->space -= num_dwords * sizeof(uint32_t);
+       ring->buffer->space -= num_dwords * sizeof(uint32_t);
        return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+int intel_ring_cacheline_align(struct intel_engine_cs *ring)
 {
-       int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+       int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
        int ret;
 
        if (num_dwords == 0)
                return 0;
 
+       num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
        ret = intel_ring_begin(ring, num_dwords);
        if (ret)
                return ret;
@@ -1688,7 +1744,7 @@ int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
        return 0;
 }
 
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
@@ -1705,7 +1761,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
        ring->hangcheck.seqno = seqno;
 }
 
-static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
                                     u32 value)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1738,7 +1794,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
                   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 }
 
-static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
                               u32 invalidate, u32 flush)
 {
        uint32_t cmd;
@@ -1774,8 +1830,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                             u32 offset, u32 len,
+gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+                             u64 offset, u32 len,
                              unsigned flags)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1789,8 +1845,8 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 
        /* FIXME(BDW): Address space and security selectors. */
        intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
-       intel_ring_emit(ring, offset);
-       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, lower_32_bits(offset));
+       intel_ring_emit(ring, upper_32_bits(offset));
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
 
@@ -1798,8 +1854,8 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                             u32 offset, u32 len,
+hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+                             u64 offset, u32 len,
                              unsigned flags)
 {
        int ret;
@@ -1819,8 +1875,8 @@ hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                             u32 offset, u32 len,
+gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+                             u64 offset, u32 len,
                              unsigned flags)
 {
        int ret;
@@ -1841,7 +1897,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_ring_flush(struct intel_engine_cs *ring,
                           u32 invalidate, u32 flush)
 {
        struct drm_device *dev = ring->dev;
@@ -1884,7 +1940,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
 
        ring->name = "render ring";
        ring->id = RCS;
@@ -1906,15 +1962,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
                ring->get_seqno = gen6_ring_get_seqno;
                ring->set_seqno = ring_set_seqno;
-               ring->sync_to = gen6_ring_sync;
-               ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-               ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
-               ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
-               ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
-               ring->signal_mbox[RCS] = GEN6_NOSYNC;
-               ring->signal_mbox[VCS] = GEN6_VRSYNC;
-               ring->signal_mbox[BCS] = GEN6_BRSYNC;
-               ring->signal_mbox[VECS] = GEN6_VERSYNC;
+               ring->semaphore.sync_to = gen6_ring_sync;
+               ring->semaphore.signal = gen6_signal;
+               /*
+                * The current semaphore is only applied on pre-gen8 platform.
+                * And there is no VCS2 ring on the pre-gen8 platform. So the
+                * semaphore between RCS and VCS2 is initialized as INVALID.
+                * Gen8 will initialize the sema between VCS2 and RCS later.
+                */
+               ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+               ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+               ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+               ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+               ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+               ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+               ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+               ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+               ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+               ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
        } else if (IS_GEN5(dev)) {
                ring->add_request = pc_render_add_request;
                ring->flush = gen4_render_ring_flush;
@@ -1985,16 +2050,25 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+       struct intel_ringbuffer *ringbuf = ring->buffer;
        int ret;
 
+       if (ringbuf == NULL) {
+               ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+               if (!ringbuf)
+                       return -ENOMEM;
+               ring->buffer = ringbuf;
+       }
+
        ring->name = "render ring";
        ring->id = RCS;
        ring->mmio_base = RENDER_RING_BASE;
 
        if (INTEL_INFO(dev)->gen >= 6) {
                /* non-kms not supported on gen6+ */
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_ringbuf;
        }
 
        /* Note: gem is not supported on gen5/ilk without kms (the corresponding
@@ -2029,31 +2103,39 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
 
-       ring->size = size;
-       ring->effective_size = ring->size;
+       ringbuf->size = size;
+       ringbuf->effective_size = ringbuf->size;
        if (IS_I830(ring->dev) || IS_845G(ring->dev))
-               ring->effective_size -= 128;
+               ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
-       ring->virtual_start = ioremap_wc(start, size);
-       if (ring->virtual_start == NULL) {
+       ringbuf->virtual_start = ioremap_wc(start, size);
+       if (ringbuf->virtual_start == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_ringbuf;
        }
 
        if (!I915_NEED_GFX_HWS(dev)) {
                ret = init_phys_status_page(ring);
                if (ret)
-                       return ret;
+                       goto err_vstart;
        }
 
        return 0;
+
+err_vstart:
+       iounmap(ringbuf->virtual_start);
+err_ringbuf:
+       kfree(ringbuf);
+       ring->buffer = NULL;
+       return ret;
 }
 
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[VCS];
 
        ring->name = "bsd ring";
        ring->id = VCS;
@@ -2082,15 +2164,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
                        ring->dispatch_execbuffer =
                                gen6_ring_dispatch_execbuffer;
                }
-               ring->sync_to = gen6_ring_sync;
-               ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
-               ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-               ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
-               ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
-               ring->signal_mbox[RCS] = GEN6_RVSYNC;
-               ring->signal_mbox[VCS] = GEN6_NOSYNC;
-               ring->signal_mbox[BCS] = GEN6_BVSYNC;
-               ring->signal_mbox[VECS] = GEN6_VEVSYNC;
+               ring->semaphore.sync_to = gen6_ring_sync;
+               ring->semaphore.signal = gen6_signal;
+               /*
+                * The current semaphore is only applied on pre-gen8 platform.
+                * And there is no VCS2 ring on the pre-gen8 platform. So the
+                * semaphore between VCS and VCS2 is initialized as INVALID.
+                * Gen8 will initialize the sema between VCS2 and VCS later.
+                */
+               ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+               ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+               ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+               ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+               ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+               ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+               ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+               ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+               ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+               ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
        } else {
                ring->mmio_base = BSD_RING_BASE;
                ring->flush = bsd_ring_flush;
@@ -2113,10 +2204,63 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
        return intel_init_ring_buffer(dev, ring);
 }
 
+/**
+ * Initialize the second BSD ring for Broadwell GT3.
+ * It is noted that this only exists on Broadwell GT3.
+ */
+int intel_init_bsd2_ring_buffer(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+
+       if ((INTEL_INFO(dev)->gen != 8)) {
+               DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
+               return -EINVAL;
+       }
+
+       ring->name = "bds2_ring";
+       ring->id = VCS2;
+
+       ring->write_tail = ring_write_tail;
+       ring->mmio_base = GEN8_BSD2_RING_BASE;
+       ring->flush = gen6_bsd_ring_flush;
+       ring->add_request = gen6_add_request;
+       ring->get_seqno = gen6_ring_get_seqno;
+       ring->set_seqno = ring_set_seqno;
+       ring->irq_enable_mask =
+                       GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+       ring->irq_get = gen8_ring_get_irq;
+       ring->irq_put = gen8_ring_put_irq;
+       ring->dispatch_execbuffer =
+                       gen8_ring_dispatch_execbuffer;
+       ring->semaphore.sync_to = gen6_ring_sync;
+       ring->semaphore.signal = gen6_signal;
+       /*
+        * The current semaphore is only applied on the pre-gen8. And there
+        * is no bsd2 ring on the pre-gen8. So now the semaphore_register
+        * between VCS2 and other ring is initialized as invalid.
+        * Gen8 will initialize the sema between VCS2 and other ring later.
+        */
+       ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+       ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+       ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+       ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+       ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+
+       ring->init = init_ring_common;
+
+       return intel_init_ring_buffer(dev, ring);
+}
+
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+       struct intel_engine_cs *ring = &dev_priv->ring[BCS];
 
        ring->name = "blitter ring";
        ring->id = BCS;
@@ -2139,15 +2283,24 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
                ring->irq_put = gen6_ring_put_irq;
                ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
        }
-       ring->sync_to = gen6_ring_sync;
-       ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
-       ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
-       ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-       ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
-       ring->signal_mbox[RCS] = GEN6_RBSYNC;
-       ring->signal_mbox[VCS] = GEN6_VBSYNC;
-       ring->signal_mbox[BCS] = GEN6_NOSYNC;
-       ring->signal_mbox[VECS] = GEN6_VEBSYNC;
+       ring->semaphore.sync_to = gen6_ring_sync;
+       ring->semaphore.signal = gen6_signal;
+       /*
+        * The current semaphore is only applied on pre-gen8 platform. And
+        * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
+        * between BCS and VCS2 is initialized as INVALID.
+        * Gen8 will initialize the sema between BCS and VCS2 later.
+        */
+       ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+       ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+       ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+       ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+       ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+       ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+       ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+       ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
        ring->init = init_ring_common;
 
        return intel_init_ring_buffer(dev, ring);
@@ -2156,7 +2309,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 int intel_init_vebox_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
+       struct intel_engine_cs *ring = &dev_priv->ring[VECS];
 
        ring->name = "video enhancement ring";
        ring->id = VECS;
@@ -2180,22 +2333,25 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
                ring->irq_put = hsw_vebox_put_irq;
                ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
        }
-       ring->sync_to = gen6_ring_sync;
-       ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
-       ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
-       ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
-       ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-       ring->signal_mbox[RCS] = GEN6_RVESYNC;
-       ring->signal_mbox[VCS] = GEN6_VVESYNC;
-       ring->signal_mbox[BCS] = GEN6_BVESYNC;
-       ring->signal_mbox[VECS] = GEN6_NOSYNC;
+       ring->semaphore.sync_to = gen6_ring_sync;
+       ring->semaphore.signal = gen6_signal;
+       ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+       ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+       ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+       ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+       ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+       ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+       ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+       ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+       ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
        ring->init = init_ring_common;
 
        return intel_init_ring_buffer(dev, ring);
 }
 
 int
-intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+intel_ring_flush_all_caches(struct intel_engine_cs *ring)
 {
        int ret;
 
@@ -2213,7 +2369,7 @@ intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
 }
 
 int
-intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
 {
        uint32_t flush_domains;
        int ret;
@@ -2231,3 +2387,19 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
        ring->gpu_caches_dirty = false;
        return 0;
 }
+
+void
+intel_stop_ring_buffer(struct intel_engine_cs *ring)
+{
+       int ret;
+
+       if (!intel_ring_initialized(ring))
+               return;
+
+       ret = intel_ring_idle(ring);
+       if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+               DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+                         ring->name, ret);
+
+       stop_ring(ring);
+}