Merge commit '9e9a928eed8796a0a1aaed7e0b676db86ba84594' into drm-next
authorDave Airlie <airlied@redhat.com>
Thu, 5 Jun 2014 10:28:59 +0000 (20:28 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 5 Jun 2014 10:28:59 +0000 (20:28 +1000)
Merge drm-fixes into drm-next.

Both i915 and radeon need this done for later patches.

Conflicts:
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c

35 files changed:
1  2 
MAINTAINERS
arch/x86/kernel/early-quirks.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/radeon/sid.h
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/imx-tve.c
include/drm/i915_pciids.h

diff --cc MAINTAINERS
Simple merge
Simple merge
@@@ -88,8 -89,13 +89,15 @@@ bool drm_helper_encoder_in_use(struct d
        struct drm_connector *connector;
        struct drm_device *dev = encoder->dev;
  
-       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       /*
+        * We can expect this mutex to be locked if we are not panicking.
+        * Locking is currently fubar in the panic handler.
+        */
 -      if (!oops_in_progress)
++      if (!oops_in_progress) {
+               WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
++              WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
++      }
        list_for_each_entry(connector, &dev->mode_config.connector_list, head)
                if (connector->encoder == encoder)
                        return true;
Simple merge
@@@ -1712,21 -1754,9 +1697,21 @@@ struct drm_i915_gem_object 
        struct drm_file *pin_filp;
  
        /** for phy allocated objects */
-       struct drm_i915_gem_phys_object *phys_obj;
+       drm_dma_handle_t *phys_handle;
 -};
  
 +      union {
 +              struct i915_gem_userptr {
 +                      uintptr_t ptr;
 +                      unsigned read_only :1;
 +                      unsigned workers :4;
 +#define I915_GEM_USERPTR_MAX_WORKERS 15
 +
 +                      struct mm_struct *mm;
 +                      struct i915_mmu_object *mn;
 +                      struct work_struct *work;
 +              } userptr;
 +      };
 +};
  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  
  /**
@@@ -2295,15 -2319,10 +2285,10 @@@ i915_gem_object_set_to_cpu_domain(struc
  int __must_check
  i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
 -                                   struct intel_ring_buffer *pipelined);
 +                                   struct intel_engine_cs *pipelined);
  void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
- int i915_gem_attach_phys_object(struct drm_device *dev,
-                               struct drm_i915_gem_object *obj,
-                               int id,
+ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                                int align);
- void i915_gem_detach_phys_object(struct drm_device *dev,
-                                struct drm_i915_gem_object *obj);
- void i915_gem_free_all_phys_object(struct drm_device *dev);
  int i915_gem_open(struct drm_device *dev, struct drm_file *file);
  void i915_gem_release(struct drm_device *dev, struct drm_file *file);
  
@@@ -44,14 -43,7 +44,9 @@@ static void i915_gem_object_flush_cpu_w
  static __must_check int
  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly);
 +static void
 +i915_gem_object_retire(struct drm_i915_gem_object *obj);
  
- static int i915_gem_phys_pwrite(struct drm_device *dev,
-                               struct drm_i915_gem_object *obj,
-                               struct drm_i915_gem_pwrite *args,
-                               struct drm_file *file);
  static void i915_gem_write_fence(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj);
  static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
@@@ -3896,9 -3974,8 +4037,9 @@@ in
  i915_gem_object_pin(struct drm_i915_gem_object *obj,
                    struct i915_address_space *vm,
                    uint32_t alignment,
-                   unsigned flags)
+                   uint64_t flags)
  {
 +      struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct i915_vma *vma;
        int ret;
  
@@@ -4783,199 -4779,12 +4921,15 @@@ i915_gem_load(struct drm_device *dev
  
        dev_priv->mm.interruptible = true;
  
 -      dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
 -      dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
 -      dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
 -      register_shrinker(&dev_priv->mm.inactive_shrinker);
 +      dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
 +      dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
 +      dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
 +      register_shrinker(&dev_priv->mm.shrinker);
 +
 +      dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
 +      register_oom_notifier(&dev_priv->mm.oom_notifier);
  }
  
- /*
-  * Create a physically contiguous memory object for this object
-  * e.g. for cursor + overlay regs
-  */
- static int i915_gem_init_phys_object(struct drm_device *dev,
-                                    int id, int size, int align)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_phys_object *phys_obj;
-       int ret;
-       if (dev_priv->mm.phys_objs[id - 1] || !size)
-               return 0;
-       phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
-       if (!phys_obj)
-               return -ENOMEM;
-       phys_obj->id = id;
-       phys_obj->handle = drm_pci_alloc(dev, size, align);
-       if (!phys_obj->handle) {
-               ret = -ENOMEM;
-               goto kfree_obj;
-       }
- #ifdef CONFIG_X86
-       set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
- #endif
-       dev_priv->mm.phys_objs[id - 1] = phys_obj;
-       return 0;
- kfree_obj:
-       kfree(phys_obj);
-       return ret;
- }
- static void i915_gem_free_phys_object(struct drm_device *dev, int id)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_phys_object *phys_obj;
-       if (!dev_priv->mm.phys_objs[id - 1])
-               return;
-       phys_obj = dev_priv->mm.phys_objs[id - 1];
-       if (phys_obj->cur_obj) {
-               i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
-       }
- #ifdef CONFIG_X86
-       set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
- #endif
-       drm_pci_free(dev, phys_obj->handle);
-       kfree(phys_obj);
-       dev_priv->mm.phys_objs[id - 1] = NULL;
- }
- void i915_gem_free_all_phys_object(struct drm_device *dev)
- {
-       int i;
-       for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
-               i915_gem_free_phys_object(dev, i);
- }
- void i915_gem_detach_phys_object(struct drm_device *dev,
-                                struct drm_i915_gem_object *obj)
- {
-       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
-       char *vaddr;
-       int i;
-       int page_count;
-       if (!obj->phys_obj)
-               return;
-       vaddr = obj->phys_obj->handle->vaddr;
-       page_count = obj->base.size / PAGE_SIZE;
-       for (i = 0; i < page_count; i++) {
-               struct page *page = shmem_read_mapping_page(mapping, i);
-               if (!IS_ERR(page)) {
-                       char *dst = kmap_atomic(page);
-                       memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
-                       kunmap_atomic(dst);
-                       drm_clflush_pages(&page, 1);
-                       set_page_dirty(page);
-                       mark_page_accessed(page);
-                       page_cache_release(page);
-               }
-       }
-       i915_gem_chipset_flush(dev);
-       obj->phys_obj->cur_obj = NULL;
-       obj->phys_obj = NULL;
- }
- int
- i915_gem_attach_phys_object(struct drm_device *dev,
-                           struct drm_i915_gem_object *obj,
-                           int id,
-                           int align)
- {
-       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
-       int page_count;
-       int i;
-       if (id > I915_MAX_PHYS_OBJECT)
-               return -EINVAL;
-       if (obj->phys_obj) {
-               if (obj->phys_obj->id == id)
-                       return 0;
-               i915_gem_detach_phys_object(dev, obj);
-       }
-       /* create a new object */
-       if (!dev_priv->mm.phys_objs[id - 1]) {
-               ret = i915_gem_init_phys_object(dev, id,
-                                               obj->base.size, align);
-               if (ret) {
-                       DRM_ERROR("failed to init phys object %d size: %zu\n",
-                                 id, obj->base.size);
-                       return ret;
-               }
-       }
-       /* bind to the object */
-       obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
-       obj->phys_obj->cur_obj = obj;
-       page_count = obj->base.size / PAGE_SIZE;
-       for (i = 0; i < page_count; i++) {
-               struct page *page;
-               char *dst, *src;
-               page = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(page))
-                       return PTR_ERR(page);
-               src = kmap_atomic(page);
-               dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
-               memcpy(dst, src, PAGE_SIZE);
-               kunmap_atomic(src);
-               mark_page_accessed(page);
-               page_cache_release(page);
-       }
-       return 0;
- }
- static int
- i915_gem_phys_pwrite(struct drm_device *dev,
-                    struct drm_i915_gem_object *obj,
-                    struct drm_i915_gem_pwrite *args,
-                    struct drm_file *file_priv)
- {
-       void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
-       char __user *user_data = to_user_ptr(args->data_ptr);
-       if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
-               unsigned long unwritten;
-               /* The physical object once assigned is fixed for the lifetime
-                * of the obj, so we can safely drop the lock and continue
-                * to access vaddr.
-                */
-               mutex_unlock(&dev->struct_mutex);
-               unwritten = copy_from_user(vaddr, user_data, args->size);
-               mutex_lock(&dev->struct_mutex);
-               if (unwritten)
-                       return -EFAULT;
-       }
-       i915_gem_chipset_flush(dev);
-       return 0;
- }
  void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  {
        struct drm_i915_file_private *file_priv = file->driver_priv;
@@@ -595,8 -597,38 +600,38 @@@ i915_gem_execbuffer_reserve_vma(struct 
        return 0;
  }
  
+ static bool
+ eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
+ {
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+       struct drm_i915_gem_object *obj = vma->obj;
+       bool need_fence, need_mappable;
+       need_fence =
+               has_fenced_gpu_access &&
+               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+               obj->tiling_mode != I915_TILING_NONE;
+       need_mappable = need_fence || need_reloc_mappable(vma);
+       WARN_ON((need_mappable || need_fence) &&
+              !i915_is_ggtt(vma->vm));
+       if (entry->alignment &&
+           vma->node.start & (entry->alignment - 1))
+               return true;
+       if (need_mappable && !obj->map_and_fenceable)
+               return true;
+       if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
+           vma->node.start < BATCH_OFFSET_BIAS)
+               return true;
+       return false;
+ }
  static int
 -i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 +i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                            struct list_head *vmas,
                            bool *need_relocs)
  {
@@@ -1009,37 -1018,25 +1028,56 @@@ i915_reset_gen7_sol_offsets(struct drm_
        return 0;
  }
  
 +/**
 + * Find one BSD ring to dispatch the corresponding BSD command.
 + * The Ring ID is returned.
 + */
 +static int gen8_dispatch_bsd_ring(struct drm_device *dev,
 +                                struct drm_file *file)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_file_private *file_priv = file->driver_priv;
 +
 +      /* Check whether the file_priv is using one ring */
 +      if (file_priv->bsd_ring)
 +              return file_priv->bsd_ring->id;
 +      else {
 +              /* If no, use the ping-pong mechanism to select one ring */
 +              int ring_id;
 +
 +              mutex_lock(&dev->struct_mutex);
 +              if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
 +                      ring_id = VCS;
 +                      dev_priv->mm.bsd_ring_dispatch_index = 1;
 +              } else {
 +                      ring_id = VCS2;
 +                      dev_priv->mm.bsd_ring_dispatch_index = 0;
 +              }
 +              file_priv->bsd_ring = &dev_priv->ring[ring_id];
 +              mutex_unlock(&dev->struct_mutex);
 +              return ring_id;
 +      }
 +}
 +
+ static struct drm_i915_gem_object *
+ eb_get_batch(struct eb_vmas *eb)
+ {
+       struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+       /*
+        * SNA is doing fancy tricks with compressing batch buffers, which leads
+        * to negative relocation deltas. Usually that works out ok since the
+        * relocate address is still positive, except when the batch is placed
+        * very low in the GTT. Ensure this doesn't happen.
+        *
+        * Note that actual hangs have only been observed on gen7, but for
+        * paranoia do it everywhere.
+        */
+       vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+       return vma->obj;
+ }
  static int
  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
@@@ -49,13 -62,62 +63,9 @@@ static int sanitize_enable_ppgtt(struc
        }
  #endif
  
-       /* Full ppgtt disabled by default for now due to issues. */
-       if (full)
-               return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
-       else
-               return HAS_ALIASING_PPGTT(dev);
+       return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
  }
  
 -#define GEN6_PPGTT_PD_ENTRIES 512
 -#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
 -typedef uint64_t gen8_gtt_pte_t;
 -typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
 -
 -/* PPGTT stuff */
 -#define GEN6_GTT_ADDR_ENCODE(addr)    ((addr) | (((addr) >> 28) & 0xff0))
 -#define HSW_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0x7f0))
 -
 -#define GEN6_PDE_VALID                        (1 << 0)
 -/* gen6+ has bit 11-4 for physical addr bit 39-32 */
 -#define GEN6_PDE_ADDR_ENCODE(addr)    GEN6_GTT_ADDR_ENCODE(addr)
 -
 -#define GEN6_PTE_VALID                        (1 << 0)
 -#define GEN6_PTE_UNCACHED             (1 << 1)
 -#define HSW_PTE_UNCACHED              (0)
 -#define GEN6_PTE_CACHE_LLC            (2 << 1)
 -#define GEN7_PTE_CACHE_L3_LLC         (3 << 1)
 -#define GEN6_PTE_ADDR_ENCODE(addr)    GEN6_GTT_ADDR_ENCODE(addr)
 -#define HSW_PTE_ADDR_ENCODE(addr)     HSW_GTT_ADDR_ENCODE(addr)
 -
 -/* Cacheability Control is a 4-bit value. The low three bits are stored in *
 - * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
 - */
 -#define HSW_CACHEABILITY_CONTROL(bits)        ((((bits) & 0x7) << 1) | \
 -                                       (((bits) & 0x8) << (11 - 3)))
 -#define HSW_WB_LLC_AGE3                       HSW_CACHEABILITY_CONTROL(0x2)
 -#define HSW_WB_LLC_AGE0                       HSW_CACHEABILITY_CONTROL(0x3)
 -#define HSW_WB_ELLC_LLC_AGE0          HSW_CACHEABILITY_CONTROL(0xb)
 -#define HSW_WB_ELLC_LLC_AGE3          HSW_CACHEABILITY_CONTROL(0x8)
 -#define HSW_WT_ELLC_LLC_AGE0          HSW_CACHEABILITY_CONTROL(0x6)
 -#define HSW_WT_ELLC_LLC_AGE3          HSW_CACHEABILITY_CONTROL(0x7)
 -
 -#define GEN8_PTES_PER_PAGE            (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
 -#define GEN8_PDES_PER_PAGE            (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
 -
 -/* GEN8 legacy style addressis defined as a 3 level page table:
 - * 31:30 | 29:21 | 20:12 |  11:0
 - * PDPE  |  PDE  |  PTE  | offset
 - * The difference as compared to normal x86 3 level page table is the PDPEs are
 - * programmed via register.
 - */
 -#define GEN8_PDPE_SHIFT                       30
 -#define GEN8_PDPE_MASK                        0x3
 -#define GEN8_PDE_SHIFT                        21
 -#define GEN8_PDE_MASK                 0x1ff
 -#define GEN8_PTE_SHIFT                        12
 -#define GEN8_PTE_MASK                 0x1ff
 -
 -#define PPAT_UNCACHED_INDEX           (_PAGE_PWT | _PAGE_PCD)
 -#define PPAT_CACHED_PDE_INDEX         0 /* WB LLC */
 -#define PPAT_CACHED_INDEX             _PAGE_PAT /* WB LLCeLLC */
 -#define PPAT_DISPLAY_ELLC_INDEX               _PAGE_PCD /* WT eLLC */
  
  static void ppgtt_bind_vma(struct i915_vma *vma,
                           enum i915_cache_level cache_level,
@@@ -2048,10 -2043,14 +2060,18 @@@ int i915_gem_gtt_init(struct drm_devic
                 gtt->base.total >> 20);
        DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
 +#ifdef CONFIG_INTEL_IOMMU
 +      if (intel_iommu_gfx_mapped)
 +              DRM_INFO("VT-d active for gfx access\n");
 +#endif
+       /*
+        * i915.enable_ppgtt is read-only, so do an early pass to validate the
+        * user's requested state against the hardware/driver capabilities.  We
+        * do this now so that we can print out any log messages once rather
+        * than every time we check intel_enable_ppgtt().
+        */
+       i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
+       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
  
        return 0;
  }
Simple merge
@@@ -11994,9 -11560,11 +11989,11 @@@ static void intel_sanitize_encoder(stru
                if (encoder->base.crtc) {
                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
                                      encoder->base.base.id,
 -                                    drm_get_encoder_name(&encoder->base));
 +                                    encoder->base.name);
                        encoder->disable(encoder);
                }
+               encoder->base.crtc = NULL;
+               encoder->connectors_active = false;
  
                /* Inconsistent output/port/pipe state happens presumably due to
                 * a bug in one of the get_hw_state functions. Or someplace else
Simple merge
@@@ -387,6 -387,15 +387,15 @@@ static bool intel_fb_initial_config(str
                                                          height);
                }
  
 -                                    drm_get_connector_name(connector));
+               /* No preferred mode marked by the EDID? Are there any modes? */
+               if (!modes[i] && !list_empty(&connector->modes)) {
+                       DRM_DEBUG_KMS("using first mode listed on connector %s\n",
++                                    connector->name);
+                       modes[i] = list_first_entry(&connector->modes,
+                                                   struct drm_display_mode,
+                                                   head);
+               }
                /* last resort: use current mode */
                if (!modes[i]) {
                        /*
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -289,9 -284,14 +289,13 @@@ void radeon_crtc_handle_vblank(struct r
        u32 update_pending;
        int vpos, hpos;
  
+       /* can happen during initialization */
+       if (radeon_crtc == NULL)
+               return;
        spin_lock_irqsave(&rdev->ddev->event_lock, flags);
 -      work = radeon_crtc->unpin_work;
 -      if (work == NULL ||
 -          (work->fence && !radeon_fence_signaled(work->fence))) {
 +      work = radeon_crtc->flip_work;
 +      if (work == NULL) {
                spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
                return;
        }
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge