Merge tag 'arc-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index a09aae7..9f445e9 100644 (file)
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
                goto out;
        }
 
-       if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
-               i915_kernel_lost_context(ring->dev);
-       else {
-               ringbuf->head = I915_READ_HEAD(ring);
-               ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-               ringbuf->space = intel_ring_space(ringbuf);
-               ringbuf->last_retired_head = -1;
-       }
+       ringbuf->head = I915_READ_HEAD(ring);
+       ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+       ringbuf->space = intel_ring_space(ringbuf);
+       ringbuf->last_retired_head = -1;
 
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
@@ -665,7 +661,8 @@ err:
        return ret;
 }
 
-static int intel_ring_workarounds_emit(struct intel_engine_cs *ring)
+static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
+                                      struct intel_context *ctx)
 {
        int ret, i;
        struct drm_device *dev = ring->dev;
@@ -704,7 +701,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring)
 }
 
 static int wa_add(struct drm_i915_private *dev_priv,
-                 const u32 addr, const u32 val, const u32 mask)
+                 const u32 addr, const u32 mask, const u32 val)
 {
        const u32 idx = dev_priv->workarounds.count;
 
@@ -720,22 +717,25 @@ static int wa_add(struct drm_i915_private *dev_priv,
        return 0;
 }
 
-#define WA_REG(addr, val, mask) { \
-               const int r = wa_add(dev_priv, (addr), (val), (mask)); \
+#define WA_REG(addr, mask, val) { \
+               const int r = wa_add(dev_priv, (addr), (mask), (val)); \
                if (r) \
                        return r; \
        }
 
 #define WA_SET_BIT_MASKED(addr, mask) \
-       WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff)
+       WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
 
 #define WA_CLR_BIT_MASKED(addr, mask) \
-       WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff)
+       WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
 
-#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask)
-#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask)
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+       WA_REG(addr, mask, _MASKED_FIELD(mask, value))
 
-#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff)
+#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
+
+#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
 static int bdw_init_workarounds(struct intel_engine_cs *ring)
 {
@@ -776,8 +776,9 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
         * disable bit, which we don't touch here, but it's good
         * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
         */
-       WA_SET_BIT_MASKED(GEN7_GT_MODE,
-                         GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
+       WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+                           GEN6_WIZ_HASHING_MASK,
+                           GEN6_WIZ_HASHING_16x4);
 
        return 0;
 }
@@ -806,7 +807,7 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int init_workarounds_ring(struct intel_engine_cs *ring)
+int init_workarounds_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1721,13 +1722,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
        return 0;
 }
 
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
-       if (!ringbuf->obj)
-               return;
-
        iounmap(ringbuf->virtual_start);
+       ringbuf->virtual_start = NULL;
        i915_gem_object_ggtt_unpin(ringbuf->obj);
+}
+
+int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+                                    struct intel_ringbuffer *ringbuf)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_gem_object *obj = ringbuf->obj;
+       int ret;
+
+       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, true);
+       if (ret) {
+               i915_gem_object_ggtt_unpin(obj);
+               return ret;
+       }
+
+       ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+                       i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+       if (ringbuf->virtual_start == NULL) {
+               i915_gem_object_ggtt_unpin(obj);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
        drm_gem_object_unreference(&ringbuf->obj->base);
        ringbuf->obj = NULL;
 }
@@ -1735,12 +1765,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 int intel_alloc_ringbuffer_obj(struct drm_device *dev,
                               struct intel_ringbuffer *ringbuf)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
-       int ret;
-
-       if (ringbuf->obj)
-               return 0;
 
        obj = NULL;
        if (!HAS_LLC(dev))
@@ -1753,30 +1778,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
 
-       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
-       if (ret)
-               goto err_unref;
-
-       ret = i915_gem_object_set_to_gtt_domain(obj, true);
-       if (ret)
-               goto err_unpin;
-
-       ringbuf->virtual_start =
-               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-                               ringbuf->size);
-       if (ringbuf->virtual_start == NULL) {
-               ret = -EINVAL;
-               goto err_unpin;
-       }
-
        ringbuf->obj = obj;
-       return 0;
 
-err_unpin:
-       i915_gem_object_ggtt_unpin(obj);
-err_unref:
-       drm_gem_object_unreference(&obj->base);
-       return ret;
+       return 0;
 }
 
 static int intel_init_ring_buffer(struct drm_device *dev,
@@ -1813,10 +1817,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                        goto error;
        }
 
-       ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
-       if (ret) {
-               DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
-               goto error;
+       if (ringbuf->obj == NULL) {
+               ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+               if (ret) {
+                       DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+                                       ring->name, ret);
+                       goto error;
+               }
+
+               ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+               if (ret) {
+                       DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+                                       ring->name, ret);
+                       intel_destroy_ringbuffer_obj(ringbuf);
+                       goto error;
+               }
        }
 
        /* Workaround an erratum on the i830 which causes a hang if
@@ -1857,6 +1872,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
        intel_stop_ring_buffer(ring);
        WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
+       intel_unpin_ringbuffer_obj(ringbuf);
        intel_destroy_ringbuffer_obj(ringbuf);
        ring->preallocated_lazy_request = NULL;
        ring->outstanding_lazy_seqno = 0;
@@ -1942,13 +1958,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
                        break;
                }
 
-               if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
-                   dev->primary->master) {
-                       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-                       if (master_priv->sarea_priv)
-                               master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-               }
-
                msleep(1);
 
                if (dev_priv->mm.interruptible && signal_pending(current)) {
@@ -2439,91 +2448,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        return intel_init_ring_buffer(dev, ring);
 }
 
-int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
-       struct intel_ringbuffer *ringbuf = ring->buffer;
-       int ret;
-
-       if (ringbuf == NULL) {
-               ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-               if (!ringbuf)
-                       return -ENOMEM;
-               ring->buffer = ringbuf;
-       }
-
-       ring->name = "render ring";
-       ring->id = RCS;
-       ring->mmio_base = RENDER_RING_BASE;
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               /* non-kms not supported on gen6+ */
-               ret = -ENODEV;
-               goto err_ringbuf;
-       }
-
-       /* Note: gem is not supported on gen5/ilk without kms (the corresponding
-        * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
-        * the special gen5 functions. */
-       ring->add_request = i9xx_add_request;
-       if (INTEL_INFO(dev)->gen < 4)
-               ring->flush = gen2_render_ring_flush;
-       else
-               ring->flush = gen4_render_ring_flush;
-       ring->get_seqno = ring_get_seqno;
-       ring->set_seqno = ring_set_seqno;
-       if (IS_GEN2(dev)) {
-               ring->irq_get = i8xx_ring_get_irq;
-               ring->irq_put = i8xx_ring_put_irq;
-       } else {
-               ring->irq_get = i9xx_ring_get_irq;
-               ring->irq_put = i9xx_ring_put_irq;
-       }
-       ring->irq_enable_mask = I915_USER_INTERRUPT;
-       ring->write_tail = ring_write_tail;
-       if (INTEL_INFO(dev)->gen >= 4)
-               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
-       else if (IS_I830(dev) || IS_845G(dev))
-               ring->dispatch_execbuffer = i830_dispatch_execbuffer;
-       else
-               ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-       ring->init = init_render_ring;
-       ring->cleanup = render_ring_cleanup;
-
-       ring->dev = dev;
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-
-       ringbuf->size = size;
-       ringbuf->effective_size = ringbuf->size;
-       if (IS_I830(ring->dev) || IS_845G(ring->dev))
-               ringbuf->effective_size -= 2 * CACHELINE_BYTES;
-
-       ringbuf->virtual_start = ioremap_wc(start, size);
-       if (ringbuf->virtual_start == NULL) {
-               DRM_ERROR("can not ioremap virtual address for"
-                         " ring buffer\n");
-               ret = -ENOMEM;
-               goto err_ringbuf;
-       }
-
-       if (!I915_NEED_GFX_HWS(dev)) {
-               ret = init_phys_status_page(ring);
-               if (ret)
-                       goto err_vstart;
-       }
-
-       return 0;
-
-err_vstart:
-       iounmap(ringbuf->virtual_start);
-err_ringbuf:
-       kfree(ringbuf);
-       ring->buffer = NULL;
-       return ret;
-}
-
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;