drm/gem: Warn on illegal use of the dumb buffer interface v2
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 28f91df..50b8422 100644 (file)
@@ -346,6 +346,7 @@ static int
 i915_gem_create(struct drm_file *file,
                struct drm_device *dev,
                uint64_t size,
+               bool dumb,
                uint32_t *handle_p)
 {
        struct drm_i915_gem_object *obj;
@@ -361,6 +362,7 @@ i915_gem_create(struct drm_file *file,
        if (obj == NULL)
                return -ENOMEM;
 
+       obj->base.dumb = dumb;
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(&obj->base);
@@ -380,7 +382,7 @@ i915_gem_dumb_create(struct drm_file *file,
        args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
        args->size = args->pitch * args->height;
        return i915_gem_create(file, dev,
-                              args->size, &args->handle);
+                              args->size, true, &args->handle);
 }
 
 /**
@@ -393,7 +395,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_create *args = data;
 
        return i915_gem_create(file, dev,
-                              args->size, &args->handle);
+                              args->size, false, &args->handle);
 }
 
 static inline int
@@ -1134,7 +1136,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
 }
 
 /**
- * __wait_seqno - wait until execution of seqno has finished
+ * __i915_wait_seqno - wait until execution of seqno has finished
  * @ring: the ring expected to report seqno
  * @seqno: duh!
  * @reset_counter: reset sequence associated with the given seqno
@@ -1151,7 +1153,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
  * Returns 0 if the seqno was found within the alloted time. Else returns the
  * errno with remaining time filled in timeout argument.
  */
-static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                        unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
@@ -1262,6 +1264,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool interruptible = dev_priv->mm.interruptible;
+       unsigned reset_counter;
        int ret;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1275,14 +1278,13 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
        if (ret)
                return ret;
 
-       return __wait_seqno(ring, seqno,
-                           atomic_read(&dev_priv->gpu_error.reset_counter),
-                           interruptible, NULL, NULL);
+       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+       return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
+                                NULL, NULL);
 }
 
 static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
-                                    struct intel_engine_cs *ring)
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
 {
        if (!obj->active)
                return 0;
@@ -1319,7 +1321,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       return i915_gem_object_wait_rendering__tail(obj, ring);
+       return i915_gem_object_wait_rendering__tail(obj);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1354,12 +1356,13 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
+       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
+                               file_priv);
        mutex_lock(&dev->struct_mutex);
        if (ret)
                return ret;
 
-       return i915_gem_object_wait_rendering__tail(obj, ring);
+       return i915_gem_object_wait_rendering__tail(obj);
 }
 
 /**
@@ -1466,6 +1469,16 @@ unlock:
  *
  * While the mapping holds a reference on the contents of the object, it doesn't
  * imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
  */
 int
 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
@@ -1762,10 +1775,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
        drm_gem_free_mmap_offset(&obj->base);
 }
 
-int
+static int
 i915_gem_mmap_gtt(struct drm_file *file,
                  struct drm_device *dev,
-                 uint32_t handle,
+                 uint32_t handle, bool dumb,
                  uint64_t *offset)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1782,6 +1795,13 @@ i915_gem_mmap_gtt(struct drm_file *file,
                goto unlock;
        }
 
+       /*
+        * We don't allow dumb mmaps on objects created using another
+        * interface.
+        */
+       WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
+                 "Illegal dumb map of accelerated buffer.\n");
+
        if (obj->base.size > dev_priv->gtt.mappable_end) {
                ret = -E2BIG;
                goto out;
@@ -1806,6 +1826,15 @@ unlock:
        return ret;
 }
 
+int
+i915_gem_dumb_map_offset(struct drm_file *file,
+                        struct drm_device *dev,
+                        uint32_t handle,
+                        uint64_t *offset)
+{
+       return i915_gem_mmap_gtt(file, dev, handle, true, offset);
+}
+
 /**
  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  * @dev: DRM device
@@ -1827,7 +1856,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_mmap_gtt *args = data;
 
-       return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+       return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
 }
 
 static inline int
@@ -1945,7 +1974,14 @@ unsigned long
 i915_gem_shrink(struct drm_i915_private *dev_priv,
                long target, unsigned flags)
 {
-       const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
+       const struct {
+               struct list_head *list;
+               unsigned int bit;
+       } phases[] = {
+               { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+               { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+               { NULL, 0 },
+       }, *phase;
        unsigned long count = 0;
 
        /*
@@ -1967,48 +2003,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
         * dev->struct_mutex and so we won't ever be able to observe an
         * object on the bound_list with a reference count equals 0.
         */
-       if (flags & I915_SHRINK_UNBOUND) {
+       for (phase = phases; phase->list; phase++) {
                struct list_head still_in_list;
 
-               INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
-                       struct drm_i915_gem_object *obj;
-
-                       obj = list_first_entry(&dev_priv->mm.unbound_list,
-                                              typeof(*obj), global_list);
-                       list_move_tail(&obj->global_list, &still_in_list);
-
-                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                               continue;
-
-                       drm_gem_object_reference(&obj->base);
-
-                       if (i915_gem_object_put_pages(obj) == 0)
-                               count += obj->base.size >> PAGE_SHIFT;
-
-                       drm_gem_object_unreference(&obj->base);
-               }
-               list_splice(&still_in_list, &dev_priv->mm.unbound_list);
-       }
-
-       if (flags & I915_SHRINK_BOUND) {
-               struct list_head still_in_list;
+               if ((flags & phase->bit) == 0)
+                       continue;
 
                INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+               while (count < target && !list_empty(phase->list)) {
                        struct drm_i915_gem_object *obj;
                        struct i915_vma *vma, *v;
 
-                       obj = list_first_entry(&dev_priv->mm.bound_list,
+                       obj = list_first_entry(phase->list,
                                               typeof(*obj), global_list);
                        list_move_tail(&obj->global_list, &still_in_list);
 
-                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                       if (flags & I915_SHRINK_PURGEABLE &&
+                           !i915_gem_object_is_purgeable(obj))
                                continue;
 
                        drm_gem_object_reference(&obj->base);
 
-                       list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                       /* For the unbound phase, this should be a no-op! */
+                       list_for_each_entry_safe(vma, v,
+                                                &obj->vma_list, vma_link)
                                if (i915_vma_unbind(vma))
                                        break;
 
@@ -2017,7 +2035,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 
                        drm_gem_object_unreference(&obj->base);
                }
-               list_splice(&still_in_list, &dev_priv->mm.bound_list);
+               list_splice(&still_in_list, phase->list);
        }
 
        return count;
@@ -2811,6 +2829,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        u32 seqno = 0;
        int ret = 0;
 
+       if (args->flags != 0)
+               return -EINVAL;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -2846,8 +2867,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
 
-       return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
-                           file->driver_priv);
+       return __i915_wait_seqno(ring, seqno, reset_counter, true,
+                                &args->timeout_ns, file->driver_priv);
 
 out:
        drm_gem_object_unreference(&obj->base);
@@ -3166,6 +3187,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
             obj->stride, obj->tiling_mode);
 
        switch (INTEL_INFO(dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
@@ -3384,46 +3406,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
        return true;
 }
 
-static void i915_gem_verify_gtt(struct drm_device *dev)
-{
-#if WATCH_GTT
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
-       int err = 0;
-
-       list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
-               if (obj->gtt_space == NULL) {
-                       printk(KERN_ERR "object found on GTT list with no space reserved\n");
-                       err++;
-                       continue;
-               }
-
-               if (obj->cache_level != obj->gtt_space->color) {
-                       printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
-                              i915_gem_obj_ggtt_offset(obj),
-                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
-                              obj->cache_level,
-                              obj->gtt_space->color);
-                       err++;
-                       continue;
-               }
-
-               if (!i915_gem_valid_gtt_space(dev,
-                                             obj->gtt_space,
-                                             obj->cache_level)) {
-                       printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
-                              i915_gem_obj_ggtt_offset(obj),
-                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
-                              obj->cache_level);
-                       err++;
-                       continue;
-               }
-       }
-
-       WARN_ON(err);
-#endif
-}
-
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
@@ -3514,25 +3496,10 @@ search_free:
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->mm_list, &vm->inactive_list);
 
-       if (i915_is_ggtt(vm)) {
-               bool mappable, fenceable;
-
-               fenceable = (vma->node.size == fence_size &&
-                            (vma->node.start & (fence_alignment - 1)) == 0);
-
-               mappable = (vma->node.start + obj->base.size <=
-                           dev_priv->gtt.mappable_end);
-
-               obj->map_and_fenceable = mappable && fenceable;
-       }
-
-       WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-
        trace_i915_vma_bind(vma, flags);
        vma->bind_vma(vma, obj->cache_level,
-                     flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+                     flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
 
-       i915_gem_verify_gtt(dev);
        return vma;
 
 err_remove_node:
@@ -3739,7 +3706,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                list_for_each_entry(vma, &obj->vma_list, vma_link)
                        if (drm_mm_node_allocated(&vma->node))
                                vma->bind_vma(vma, cache_level,
-                                             obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+                                               vma->bound & GLOBAL_BIND);
        }
 
        list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3769,7 +3736,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                                    old_write_domain);
        }
 
-       i915_gem_verify_gtt(dev);
        return 0;
 }
 
@@ -4067,7 +4033,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (seqno == 0)
                return 0;
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
+       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -4101,6 +4067,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct i915_vma *vma;
+       unsigned bound;
        int ret;
 
        if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
@@ -4109,6 +4076,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
                return -EINVAL;
 
+       if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
+               return -EINVAL;
+
        vma = i915_gem_obj_to_vma(obj, vm);
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -4130,15 +4100,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                }
        }
 
+       bound = vma ? vma->bound : 0;
        if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
                vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
        }
 
-       if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
+       if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
                vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 
+       if ((bound ^ vma->bound) & GLOBAL_BIND) {
+               bool mappable, fenceable;
+               u32 fence_size, fence_alignment;
+
+               fence_size = i915_gem_get_gtt_size(obj->base.dev,
+                                                  obj->base.size,
+                                                  obj->tiling_mode);
+               fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
+                                                            obj->base.size,
+                                                            obj->tiling_mode,
+                                                            true);
+
+               fenceable = (vma->node.size == fence_size &&
+                            (vma->node.start & (fence_alignment - 1)) == 0);
+
+               mappable = (vma->node.start + obj->base.size <=
+                           dev_priv->gtt.mappable_end);
+
+               obj->map_and_fenceable = mappable && fenceable;
+       }
+
+       WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+
        vma->pin_count++;
        if (flags & PIN_MAPPABLE)
                obj->pin_mappable |= true;
@@ -5119,6 +5113,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        return ret;
 }
 
+/**
+ * i915_gem_track_fb - update frontbuffer tracking
+ * old: current GEM buffer for the frontbuffer slots
+ * new: new GEM buffer for the frontbuffer slots
+ * frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
                       unsigned frontbuffer_bits)
@@ -5302,7 +5305,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        struct drm_device *dev = dev_priv->dev;
        struct drm_i915_gem_object *obj;
        unsigned long timeout = msecs_to_jiffies(5000) + 1;
-       unsigned long pinned, bound, unbound, freed;
+       unsigned long pinned, bound, unbound, freed_pages;
        bool was_interruptible;
        bool unlock;
 
@@ -5319,7 +5322,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        was_interruptible = dev_priv->mm.interruptible;
        dev_priv->mm.interruptible = false;
 
-       freed = i915_gem_shrink_all(dev_priv);
+       freed_pages = i915_gem_shrink_all(dev_priv);
 
        dev_priv->mm.interruptible = was_interruptible;
 
@@ -5350,14 +5353,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
 
-       pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
-               freed, pinned);
+       if (freed_pages || unbound || bound)
+               pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
+                       freed_pages << PAGE_SHIFT, pinned);
        if (unbound || bound)
                pr_err("%lu and %lu bytes still available in the "
                       "bound and unbound GPU page lists.\n",
                       bound, unbound);
 
-       *(unsigned long *)ptr += freed;
+       *(unsigned long *)ptr += freed_pages;
        return NOTIFY_DONE;
 }