Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm...
authorDave Airlie <airlied@redhat.com>
Wed, 1 Jun 2016 21:58:36 +0000 (07:58 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 1 Jun 2016 21:58:36 +0000 (07:58 +1000)
drm-intel-next-2016-05-22:
- cmd-parser support for direct reg->reg loads (Ken Graunke)
- better handle DP++ smart dongles (Ville)
- bxt guc fw loading support (Nick Hoathe)
- remove a bunch of struct typedefs from dpll code (Ander)
- tons of small work all over to avoid casting between drm_device and the i915
  dev struct (Tvrtko&Chris)
- untangle request retiring from other operations, also fixes reset stat corner
  cases (Chris)
- skl atomic watermark support from Matt Roper, yay!
- various wm handling bugfixes from Ville
- big pile of cdclck rework for bxt/skl (Ville)
- CABC (Content Adaptive Brigthness Control) for dsi panels (Jani&Deepak M)
- nonblocking atomic commits for plane-only updates (Maarten Lankhorst)
- bunch of PSR fixes&improvements
- untangle our map/pin/sg_iter code a bit (Dave Gordon)
drm-intel-next-2016-05-08:
- refactor stolen quirks to share code between early quirks and i915 (Joonas)
- refactor gem BO/vma funcstion (Tvrtko&Dave)
- backlight over DPCD support (Yetunde Abedisi)
- more dsi panel sequence support (Jani)
- lots of refactoring around handling iomaps, vma, ring access and related
  topics culmulating in removing the duplicated request tracking in the execlist
  code (Chris & Tvrtko) includes a small patch for core iomapping code
- hw state readout for bxt dsi (Ramalingam C)
- cdclk cleanups (Ville)
- dedupe chv pll code a bit (Ander)
- enable semaphores on gen8+ for legacy submission, to be able to have a direct
  comparison against execlist on the same platform (Chris) Not meant to be used
  for anything else but performance tuning
- lvds border bit hw state checker fix (Jani)
- rpm vs. shrinker/oom-notifier fixes (Praveen Paneri)
- l3 tuning (Imre)
- revert mst dp audio, it's totally non-functional and crash-y (Lyude)
- first official dmc for kbl (Rodrigo)
- and tons of small things all over as usual

* 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (194 commits)
  drm/i915: Revert async unpin and nonblocking atomic commit
  drm/i915: Update DRIVER_DATE to 20160522
  drm/i915: Inline sg_next() for the optimised SGL iterator
  drm/i915: Introduce & use new lightweight SGL iterators
  drm/i915: optimise i915_gem_object_map() for small objects
  drm/i915: refactor i915_gem_object_pin_map()
  drm/i915/psr: Implement PSR2 w/a for gen9
  drm/i915/psr: Use ->get_aux_send_ctl functions
  drm/i915/psr: Order DP aux transactions correctly
  drm/i915/psr: Make idle_frames sensible again
  drm/i915/psr: Try to program link training times correctly
  drm/i915/userptr: Convert to drm_i915_private
  drm/i915: Allow nonblocking update of pageflips.
  drm/i915: Check for unpin correctness.
  Reapply "drm/i915: Avoid stalling on pending flips for legacy cursor updates"
  drm/i915: Make unpin async.
  drm/i915: Prepare connectors for nonblocking checks.
  drm/i915: Pass atomic states to fbc update functions.
  drm/i915: Remove reset_counter from intel_crtc.
  drm/i915: Remove queue_flip pointer.
  ...

12 files changed:
1  2 
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
include/drm/drmP.h

  #include <linux/vgaarb.h>
  #include <linux/export.h>
  
 -/* Access macro for slots in vblank timestamp ringbuffer. */
 -#define vblanktimestamp(dev, pipe, count) \
 -      ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
 -
  /* Retry timestamp calculation up to 3 times to satisfy
   * drm_timestamp_precision before giving up.
   */
@@@ -78,15 -82,29 +78,15 @@@ static void store_vblank(struct drm_dev
                         struct timeval *t_vblank, u32 last)
  {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 -      u32 tslot;
  
        assert_spin_locked(&dev->vblank_time_lock);
  
        vblank->last = last;
  
 -      /* All writers hold the spinlock, but readers are serialized by
 -       * the latching of vblank->count below.
 -       */
 -      tslot = vblank->count + vblank_count_inc;
 -      vblanktimestamp(dev, pipe, tslot) = *t_vblank;
 -
 -      /*
 -       * vblank timestamp updates are protected on the write side with
 -       * vblank_time_lock, but on the read side done locklessly using a
 -       * sequence-lock on the vblank counter. Ensure correct ordering using
 -       * memory barrriers. We need the barrier both before and also after the
 -       * counter update to synchronize with the next timestamp write.
 -       * The read-side barriers for this are in drm_vblank_count_and_time.
 -       */
 -      smp_wmb();
 +      write_seqlock(&vblank->seqlock);
 +      vblank->time = *t_vblank;
        vblank->count += vblank_count_inc;
 -      smp_wmb();
 +      write_sequnlock(&vblank->seqlock);
  }
  
  /**
@@@ -187,7 -205,7 +187,7 @@@ static void drm_update_vblank_count(str
                const struct timeval *t_old;
                u64 diff_ns;
  
 -              t_old = &vblanktimestamp(dev, pipe, vblank->count);
 +              t_old = &vblank->time;
                diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
  
                /*
                diff = 1;
        }
  
 -      /*
 -       * FIMXE: Need to replace this hack with proper seqlocks.
 -       *
 -       * Restrict the bump of the software vblank counter to a safe maximum
 -       * value of +1 whenever there is the possibility that concurrent readers
 -       * of vblank timestamps could be active at the moment, as the current
 -       * implementation of the timestamp caching and updating is not safe
 -       * against concurrent readers for calls to store_vblank() with a bump
 -       * of anything but +1. A bump != 1 would very likely return corrupted
 -       * timestamps to userspace, because the same slot in the cache could
 -       * be concurrently written by store_vblank() and read by one of those
 -       * readers without the read-retry logic detecting the collision.
 -       *
 -       * Concurrent readers can exist when we are called from the
 -       * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
 -       * irq callers. However, all those calls to us are happening with the
 -       * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
 -       * can't increase while we are executing. Therefore a zero refcount at
 -       * this point is safe for arbitrary counter bumps if we are called
 -       * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
 -       * we must also accept a refcount of 1, as whenever we are called from
 -       * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
 -       * we must let that one pass through in order to not lose vblank counts
 -       * during vblank irq off - which would completely defeat the whole
 -       * point of this routine.
 -       *
 -       * Whenever we are called from vblank irq, we have to assume concurrent
 -       * readers exist or can show up any time during our execution, even if
 -       * the refcount is currently zero, as vblank irqs are usually only
 -       * enabled due to the presence of readers, and because when we are called
 -       * from vblank irq we can't hold the vbl_lock to protect us from sudden
 -       * bumps in vblank refcount. Therefore also restrict bumps to +1 when
 -       * called from vblank irq.
 -       */
 -      if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
 -          (flags & DRM_CALLED_FROM_VBLIRQ))) {
 -              DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
 -                            "refcount %u, vblirq %u\n", pipe, diff,
 -                            atomic_read(&vblank->refcount),
 -                            (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
 -              diff = 1;
 -      }
 -
        DRM_DEBUG_VBL("updating vblank count on crtc %u:"
                      " current=%u, diff=%u, hw=%u hw_last=%u\n",
                      pipe, vblank->count, diff, cur_vblank, vblank->last);
        store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
  }
  
+ /**
+  * drm_accurate_vblank_count - retrieve the master vblank counter
+  * @crtc: which counter to retrieve
+  *
+  * This function is similar to @drm_crtc_vblank_count but this
+  * function interpolates to handle a race with vblank irq's.
+  *
+  * This is mostly useful for hardware that can obtain the scanout
+  * position, but doesn't have a frame counter.
+  */
+ u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
+ {
+       struct drm_device *dev = crtc->dev;
+       unsigned int pipe = drm_crtc_index(crtc);
+       u32 vblank;
+       unsigned long flags;
+       WARN(!dev->driver->get_vblank_timestamp,
+            "This function requires support for accurate vblank timestamps.");
+       spin_lock_irqsave(&dev->vblank_time_lock, flags);
+       drm_update_vblank_count(dev, pipe, 0);
+       vblank = drm_vblank_count(dev, pipe);
+       spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
+       return vblank;
+ }
+ EXPORT_SYMBOL(drm_accurate_vblank_count);
  /*
   * Disable vblank irq's on crtc, make sure that last vblank count
   * of hardware and corresponding consistent software vblank counter
@@@ -287,6 -379,9 +318,6 @@@ static void vblank_disable_fn(unsigned 
        unsigned int pipe = vblank->pipe;
        unsigned long irqflags;
  
 -      if (!dev->vblank_disable_allowed)
 -              return;
 -
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
        if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
                DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
@@@ -356,7 -451,6 +387,7 @@@ int drm_vblank_init(struct drm_device *
                init_waitqueue_head(&vblank->queue);
                setup_timer(&vblank->disable_timer, vblank_disable_fn,
                            (unsigned long)vblank);
 +              seqlock_init(&vblank->seqlock);
        }
  
        DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
                         "get_vblank_timestamp == NULL\n");
        }
  
 -      dev->vblank_disable_allowed = false;
 -
        return 0;
  
  err:
@@@ -926,19 -1022,25 +957,19 @@@ u32 drm_vblank_count_and_time(struct dr
                              struct timeval *vblanktime)
  {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 -      int count = DRM_TIMESTAMP_MAXRETRIES;
 -      u32 cur_vblank;
 +      u32 vblank_count;
 +      unsigned int seq;
  
        if (WARN_ON(pipe >= dev->num_crtcs))
                return 0;
  
 -      /*
 -       * Vblank timestamps are read lockless. To ensure consistency the vblank
 -       * counter is rechecked and ordering is ensured using memory barriers.
 -       * This works like a seqlock. The write-side barriers are in store_vblank.
 -       */
        do {
 -              cur_vblank = vblank->count;
 -              smp_rmb();
 -              *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
 -              smp_rmb();
 -      } while (cur_vblank != vblank->count && --count > 0);
 +              seq = read_seqbegin(&vblank->seqlock);
 +              vblank_count = vblank->count;
 +              *vblanktime = vblank->time;
 +      } while (read_seqretry(&vblank->seqlock, seq));
  
 -      return cur_vblank;
 +      return vblank_count;
  }
  EXPORT_SYMBOL(drm_vblank_count_and_time);
  
@@@ -1514,6 -1616,7 +1545,6 @@@ void drm_vblank_post_modeset(struct drm
  
        if (vblank->inmodeset) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
 -              dev->vblank_disable_allowed = true;
                drm_reset_vblank_timestamp(dev, pipe);
                spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
  
@@@ -186,7 -186,7 +186,7 @@@ static int i915_getparam(struct drm_dev
                value = 1;
                break;
        case I915_PARAM_HAS_SEMAPHORES:
-               value = i915_semaphore_is_enabled(dev);
+               value = i915_semaphore_is_enabled(dev_priv);
                break;
        case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
                value = 1;
                value = 1;
                break;
        case I915_PARAM_CMD_PARSER_VERSION:
-               value = i915_cmd_parser_get_version();
+               value = i915_cmd_parser_get_version(dev_priv);
                break;
        case I915_PARAM_HAS_COHERENT_PHYS_GTT:
                value = 1;
                        return -ENODEV;
                break;
        case I915_PARAM_HAS_GPU_RESET:
-               value = i915.enable_hangcheck &&
-                       intel_has_gpu_reset(dev);
+               value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
                break;
        case I915_PARAM_HAS_RESOURCE_STREAMER:
                value = HAS_RESOURCE_STREAMER(dev);
@@@ -425,6 -424,43 +424,43 @@@ static const struct vga_switcheroo_clie
        .can_switch = i915_switcheroo_can_switch,
  };
  
+ static void i915_gem_fini(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       /*
+        * Neither the BIOS, ourselves or any other kernel
+        * expects the system to be in execlists mode on startup,
+        * so we need to reset the GPU back to legacy mode. And the only
+        * known way to disable logical contexts is through a GPU reset.
+        *
+        * So in order to leave the system in a known default configuration,
+        * always reset the GPU upon unload. Afterwards we then clean up the
+        * GEM state tracking, flushing off the requests and leaving the
+        * system in a known idle state.
+        *
+        * Note that is of the upmost importance that the GPU is idle and
+        * all stray writes are flushed *before* we dismantle the backing
+        * storage for the pinned objects.
+        *
+        * However, since we are uncertain that reseting the GPU on older
+        * machines is a good idea, we don't - just in case it leaves the
+        * machine in an unusable condition.
+        */
+       if (HAS_HW_CONTEXTS(dev)) {
+               int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+               WARN_ON(reset && reset != -ENODEV);
+       }
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_reset(dev);
+       i915_gem_cleanup_engines(dev);
+       i915_gem_context_fini(dev);
+       mutex_unlock(&dev->struct_mutex);
+       WARN_ON(!list_empty(&to_i915(dev)->context_list));
+ }
  static int i915_load_modeset_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        if (ret)
                goto cleanup_vga_client;
  
+       /* must happen before intel_power_domains_init_hw() on VLV/CHV */
+       intel_update_rawclk(dev_priv);
        intel_power_domains_init_hw(dev_priv, false);
  
        intel_csr_ucode_init(dev_priv);
  
        intel_modeset_gem_init(dev);
  
 -      /* Always safe in the mode setting case. */
 -      /* FIXME: do pre/post-mode set stuff in core KMS code */
 -      dev->vblank_disable_allowed = true;
        if (INTEL_INFO(dev)->num_pipes == 0)
                return 0;
  
        return 0;
  
  cleanup_gem:
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_engines(dev);
-       i915_gem_context_fini(dev);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_fini(dev);
  cleanup_irq:
        intel_guc_ucode_fini(dev);
        drm_irq_uninstall(dev);
@@@ -850,7 -889,7 +886,7 @@@ static void intel_device_info_runtime_i
                DRM_INFO("Display disabled (module parameter)\n");
                info->num_pipes = 0;
        } else if (info->num_pipes > 0 &&
-                  (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
+                  (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
                   HAS_PCH_SPLIT(dev)) {
                u32 fuse_strap = I915_READ(FUSE_STRAP);
                u32 sfuse_strap = I915_READ(SFUSE_STRAP);
                        DRM_INFO("PipeC fused off\n");
                        info->num_pipes -= 1;
                }
-       } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
+       } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
                u32 dfsm = I915_READ(SKL_DFSM);
                u8 disabled_mask = 0;
                bool invalid;
        else if (INTEL_INFO(dev)->gen >= 9)
                gen9_sseu_info_init(dev);
  
-       /* Snooping is broken on BXT A stepping. */
        info->has_snoop = !info->has_llc;
-       info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
+       /* Snooping is broken on BXT A stepping. */
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+               info->has_snoop = false;
  
        DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
        DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
                         info->has_subslice_pg ? "y" : "n");
        DRM_DEBUG_DRIVER("has EU power gating: %s\n",
                         info->has_eu_pg ? "y" : "n");
+       i915.enable_execlists =
+               intel_sanitize_enable_execlists(dev_priv,
+                                               i915.enable_execlists);
+       /*
+        * i915.enable_ppgtt is read-only, so do an early pass to validate the
+        * user's requested state against the hardware/driver capabilities.  We
+        * do this now so that we can print out any log messages once rather
+        * than every time we check intel_enable_ppgtt().
+        */
+       i915.enable_ppgtt =
+               intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
+       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
  }
  
  static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@@ -1020,6 -1075,9 +1072,9 @@@ static int i915_driver_init_early(struc
        memcpy(device_info, info, sizeof(dev_priv->info));
        device_info->device_id = dev->pdev->device;
  
+       BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+       device_info->gen_mask = BIT(device_info->gen - 1);
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
        mutex_init(&dev_priv->backlight_lock);
@@@ -1137,7 -1195,7 +1192,7 @@@ static int i915_driver_init_mmio(struc
        if (ret < 0)
                goto put_bridge;
  
-       intel_uncore_init(dev);
+       intel_uncore_init(dev_priv);
  
        return 0;
  
@@@ -1155,7 -1213,7 +1210,7 @@@ static void i915_driver_cleanup_mmio(st
  {
        struct drm_device *dev = dev_priv->dev;
  
-       intel_uncore_fini(dev);
+       intel_uncore_fini(dev_priv);
        i915_mmio_cleanup(dev);
        pci_dev_put(dev_priv->bridge_dev);
  }
@@@ -1206,8 -1264,15 +1261,15 @@@ static int i915_driver_init_hw(struct d
        pci_set_master(dev->pdev);
  
        /* overlay on gen2 is broken and can't address above 1G */
-       if (IS_GEN2(dev))
-               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+       if (IS_GEN2(dev)) {
+               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+               if (ret) {
+                       DRM_ERROR("failed to set DMA mask\n");
+                       goto out_ggtt;
+               }
+       }
  
        /* 965GM sometimes incorrectly writes to hardware status page (HWS)
         * using 32bit addressing, overwriting memory if HWS is located
         * behaviour if any general state is accessed within a page above 4GB,
         * which also needs to be handled carefully.
         */
-       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
-               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+               if (ret) {
+                       DRM_ERROR("failed to set DMA mask\n");
+                       goto out_ggtt;
+               }
+       }
  
        aperture_size = ggtt->mappable_end;
  
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
                           PM_QOS_DEFAULT_VALUE);
  
-       intel_uncore_sanitize(dev);
+       intel_uncore_sanitize(dev_priv);
  
        intel_opregion_setup(dev);
  
@@@ -1300,7 -1372,7 +1369,7 @@@ static void i915_driver_register(struc
         * Notify a valid surface after modesetting,
         * when running inside a VM.
         */
-       if (intel_vgpu_active(dev))
+       if (intel_vgpu_active(dev_priv))
                I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
  
        i915_setup_sysfs(dev);
@@@ -1459,10 -1531,7 +1528,7 @@@ int i915_driver_unload(struct drm_devic
        flush_workqueue(dev_priv->wq);
  
        intel_guc_ucode_fini(dev);
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_engines(dev);
-       i915_gem_context_fini(dev);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_fini(dev);
        intel_fbc_cleanup_cfb(dev_priv);
  
        intel_power_domains_fini(dev_priv);
@@@ -1570,15 -1639,15 +1636,15 @@@ const struct drm_ioctl_desc i915_ioctls
        DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
        DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
  #include "i915_trace.h"
  #include "intel_drv.h"
  
 -#include <linux/apple-gmux.h>
  #include <linux/console.h>
  #include <linux/module.h>
  #include <linux/pm_runtime.h>
 -#include <linux/vgaarb.h>
  #include <linux/vga_switcheroo.h>
  #include <drm/drm_crtc_helper.h>
  
@@@ -298,22 -300,26 +298,26 @@@ static const struct intel_device_info i
  static const struct intel_device_info intel_broadwell_d_info = {
        BDW_FEATURES,
        .gen = 8,
+       .is_broadwell = 1,
  };
  
  static const struct intel_device_info intel_broadwell_m_info = {
        BDW_FEATURES,
        .gen = 8, .is_mobile = 1,
+       .is_broadwell = 1,
  };
  
  static const struct intel_device_info intel_broadwell_gt3d_info = {
        BDW_FEATURES,
        .gen = 8,
+       .is_broadwell = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  };
  
  static const struct intel_device_info intel_broadwell_gt3m_info = {
        BDW_FEATURES,
        .gen = 8, .is_mobile = 1,
+       .is_broadwell = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
  };
  
@@@ -528,9 -534,9 +532,9 @@@ void intel_detect_pch(struct drm_devic
        pci_dev_put(pch);
  }
  
- bool i915_semaphore_is_enabled(struct drm_device *dev)
+ bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
  {
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return false;
  
        if (i915.semaphores >= 0)
        if (i915.enable_execlists)
                return false;
  
-       /* Until we get further testing... */
-       if (IS_GEN8(dev))
-               return false;
  #ifdef CONFIG_INTEL_IOMMU
        /* Enable semaphores on SNB when IO remapping is off */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+       if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
                return false;
  #endif
  
@@@ -608,7 -610,7 +608,7 @@@ static int i915_drm_suspend(struct drm_
  
        intel_guc_suspend(dev);
  
-       intel_suspend_gt_powersave(dev);
+       intel_suspend_gt_powersave(dev_priv);
  
        intel_display_suspend(dev);
  
        opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
        intel_opregion_notify_adapter(dev, opregion_target_state);
  
-       intel_uncore_forcewake_reset(dev, false);
+       intel_uncore_forcewake_reset(dev_priv, false);
        intel_opregion_fini(dev);
  
        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@@ -775,7 -777,7 +775,7 @@@ static int i915_drm_resume(struct drm_d
  
        spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev);
+               dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
  
        intel_dp_mst_resume(dev);
@@@ -868,9 -870,9 +868,9 @@@ static int i915_drm_resume_early(struc
                DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
                          ret);
  
-       intel_uncore_early_sanitize(dev, true);
+       intel_uncore_early_sanitize(dev_priv, true);
  
-       if (IS_BROXTON(dev)) {
+       if (IS_BROXTON(dev_priv)) {
                if (!dev_priv->suspended_to_idle)
                        gen9_sanitize_dc_state(dev_priv);
                bxt_disable_dc9(dev_priv);
                hsw_disable_pc8(dev_priv);
        }
  
-       intel_uncore_sanitize(dev);
+       intel_uncore_sanitize(dev_priv);
  
        if (IS_BROXTON(dev_priv) ||
            !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@@ -921,14 -923,14 +921,14 @@@ int i915_resume_switcheroo(struct drm_d
   *   - re-init interrupt state
   *   - re-init display
   */
- int i915_reset(struct drm_device *dev)
+ int i915_reset(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
        struct i915_gpu_error *error = &dev_priv->gpu_error;
        unsigned reset_counter;
        int ret;
  
-       intel_reset_gt_powersave(dev);
+       intel_reset_gt_powersave(dev_priv);
  
        mutex_lock(&dev->struct_mutex);
  
  
        i915_gem_reset(dev);
  
-       ret = intel_gpu_reset(dev, ALL_ENGINES);
+       ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
  
        /* Also reset the gpu hangman. */
        if (error->stop_rings != 0) {
         * of re-init after reset.
         */
        if (INTEL_INFO(dev)->gen > 5)
-               intel_enable_gt_powersave(dev);
+               intel_enable_gt_powersave(dev_priv);
  
        return 0;
  
@@@ -1028,7 -1030,13 +1028,7 @@@ static int i915_pci_probe(struct pci_de
        if (PCI_FUNC(pdev->devfn))
                return -ENODEV;
  
 -      /*
 -       * apple-gmux is needed on dual GPU MacBook Pro
 -       * to probe the panel if we're the inactive GPU.
 -       */
 -      if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
 -          apple_gmux_present() && pdev != vga_default_device() &&
 -          !vga_switcheroo_handler_flags())
 +      if (vga_switcheroo_client_probe_defer(pdev))
                return -EPROBE_DEFER;
  
        return drm_get_pci_dev(pdev, ent, &driver);
@@@ -1107,6 -1115,49 +1107,49 @@@ static int i915_pm_resume(struct devic
        return i915_drm_resume(drm_dev);
  }
  
+ /* freeze: before creating the hibernation_image */
+ static int i915_pm_freeze(struct device *dev)
+ {
+       return i915_pm_suspend(dev);
+ }
+ static int i915_pm_freeze_late(struct device *dev)
+ {
+       int ret;
+       ret = i915_pm_suspend_late(dev);
+       if (ret)
+               return ret;
+       ret = i915_gem_freeze_late(dev_to_i915(dev));
+       if (ret)
+               return ret;
+       return 0;
+ }
+ /* thaw: called after creating the hibernation image, but before turning off. */
+ static int i915_pm_thaw_early(struct device *dev)
+ {
+       return i915_pm_resume_early(dev);
+ }
+ static int i915_pm_thaw(struct device *dev)
+ {
+       return i915_pm_resume(dev);
+ }
+ /* restore: called after loading the hibernation image. */
+ static int i915_pm_restore_early(struct device *dev)
+ {
+       return i915_pm_resume_early(dev);
+ }
+ static int i915_pm_restore(struct device *dev)
+ {
+       return i915_pm_resume(dev);
+ }
  /*
   * Save all Gunit registers that may be lost after a D3 and a subsequent
   * S0i[R123] transition. The list of registers needing a save/restore is
@@@ -1470,7 -1521,7 +1513,7 @@@ static int intel_runtime_suspend(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
-       if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
+       if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
                return -ENODEV;
  
        if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
  
        intel_guc_suspend(dev);
  
-       intel_suspend_gt_powersave(dev);
+       intel_suspend_gt_powersave(dev_priv);
        intel_runtime_pm_disable_interrupts(dev_priv);
  
        ret = 0;
                return ret;
        }
  
-       intel_uncore_forcewake_reset(dev, false);
+       intel_uncore_forcewake_reset(dev_priv, false);
  
        enable_rpm_wakeref_asserts(dev_priv);
        WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@@ -1612,7 -1663,7 +1655,7 @@@ static int intel_runtime_resume(struct 
         * we can do is to hope that things will still work (and disable RPM).
         */
        i915_gem_init_swizzling(dev);
-       gen6_update_ring_freq(dev);
+       gen6_update_ring_freq(dev_priv);
  
        intel_runtime_pm_enable_interrupts(dev_priv);
  
        if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_init(dev_priv);
  
-       intel_enable_gt_powersave(dev);
+       intel_enable_gt_powersave(dev_priv);
  
        enable_rpm_wakeref_asserts(dev_priv);
  
@@@ -1661,14 -1712,14 +1704,14 @@@ static const struct dev_pm_ops i915_pm_
         * @restore, @restore_early : called after rebooting and restoring the
         *                            hibernation image [PMSG_RESTORE]
         */
-       .freeze = i915_pm_suspend,
-       .freeze_late = i915_pm_suspend_late,
-       .thaw_early = i915_pm_resume_early,
-       .thaw = i915_pm_resume,
+       .freeze = i915_pm_freeze,
+       .freeze_late = i915_pm_freeze_late,
+       .thaw_early = i915_pm_thaw_early,
+       .thaw = i915_pm_thaw,
        .poweroff = i915_pm_suspend,
        .poweroff_late = i915_pm_poweroff_late,
-       .restore_early = i915_pm_resume_early,
-       .restore = i915_pm_resume,
+       .restore_early = i915_pm_restore_early,
+       .restore = i915_pm_restore,
  
        /* S0ix (via runtime suspend) event handlers */
        .runtime_suspend = intel_runtime_suspend,
@@@ -66,7 -66,7 +66,7 @@@
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
- #define DRIVER_DATE           "20160425"
+ #define DRIVER_DATE           "20160522"
  
  #undef WARN_ON
  /* Many gcc seem to no see through this and fall over :( */
@@@ -324,6 -324,12 +324,12 @@@ struct i915_hotplug 
                            &dev->mode_config.plane_list,       \
                            base.head)
  
+ #define for_each_intel_plane_mask(dev, intel_plane, plane_mask)               \
+       list_for_each_entry(intel_plane, &dev->mode_config.plane_list,  \
+                           base.head)                                  \
+               for_each_if ((plane_mask) &                             \
+                            (1 << drm_plane_index(&intel_plane->base)))
  #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)    \
        list_for_each_entry(intel_plane,                                \
                            &(dev)->mode_config.plane_list,             \
  #define for_each_intel_crtc(dev, intel_crtc) \
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
  
+ #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
+               for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
  #define for_each_intel_encoder(dev, intel_encoder)            \
        list_for_each_entry(intel_encoder,                      \
                            &(dev)->mode_config.encoder_list,   \
@@@ -588,6 -598,7 +598,7 @@@ struct drm_i915_display_funcs 
                                       struct intel_crtc_state *newstate);
        void (*initial_watermarks)(struct intel_crtc_state *cstate);
        void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+       int (*compute_global_watermarks)(struct drm_atomic_state *state);
        void (*update_wm)(struct drm_crtc *crtc);
        int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
        void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
                          struct drm_i915_gem_object *obj,
                          struct drm_i915_gem_request *req,
                          uint32_t flags);
-       void (*hpd_irq_setup)(struct drm_device *dev);
+       void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@@ -735,6 -746,7 +746,7 @@@ struct intel_csr 
        func(is_valleyview) sep \
        func(is_cherryview) sep \
        func(is_haswell) sep \
+       func(is_broadwell) sep \
        func(is_skylake) sep \
        func(is_broxton) sep \
        func(is_kabylake) sep \
  struct intel_device_info {
        u32 display_mmio_offset;
        u16 device_id;
-       u8 num_pipes:3;
+       u8 num_pipes;
        u8 num_sprites[I915_MAX_PIPES];
        u8 gen;
+       u16 gen_mask;
        u8 ring_mask; /* Rings supported by the HW */
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
        /* Register offsets for the various display pipes and transcoders */
@@@ -851,6 -864,9 +864,9 @@@ struct intel_context 
        struct i915_ctx_hang_stats hang_stats;
        struct i915_hw_ppgtt *ppgtt;
  
+       /* Unique identifier for this context, used by the hw for tracking */
+       unsigned hw_id;
        /* Legacy ring buffer submission */
        struct {
                struct drm_i915_gem_object *rcs_state;
                struct i915_vma *lrc_vma;
                u64 lrc_desc;
                uint32_t *lrc_reg_state;
+               bool initialised;
        } engine[I915_NUM_ENGINES];
  
        struct list_head link;
@@@ -1488,6 -1505,7 +1505,7 @@@ struct intel_vbt_data 
                bool present;
                bool active_low_pwm;
                u8 min_brightness;      /* min_brightness/255 of max */
+               enum intel_backlight_type type;
        } backlight;
  
        /* MIPI DSI */
@@@ -1580,7 -1598,7 +1598,7 @@@ struct skl_ddb_allocation 
  };
  
  struct skl_wm_values {
-       bool dirty[I915_MAX_PIPES];
+       unsigned dirty_pipes;
        struct skl_ddb_allocation ddb;
        uint32_t wm_linetime[I915_MAX_PIPES];
        uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@@ -1838,6 -1856,13 +1856,13 @@@ struct drm_i915_private 
        DECLARE_HASHTABLE(mm_structs, 7);
        struct mutex mm_lock;
  
+       /* The hw wants to have a stable context identifier for the lifetime
+        * of the context (for OA, PASID, faults, etc). This is limited
+        * in execlists to 21 bits.
+        */
+       struct ida context_hw_ida;
+ #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
        /* Kernel Modesetting */
  
        struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
                 */
                uint16_t skl_latency[8];
  
-               /* Committed wm config */
-               struct intel_wm_config config;
                /*
                 * The skl_wm_values structure is a bit too big for stack
                 * allocation, so we keep the staging struct where we store
                 * cstate->wm.need_postvbl_update.
                 */
                struct mutex wm_mutex;
+               /*
+                * Set during HW readout of watermarks/DDB.  Some platforms
+                * need to know when we're still using BIOS-provided values
+                * (which we don't fully trust).
+                */
+               bool distrust_bios_wm;
        } wm;
  
        struct i915_runtime_pm pm;
@@@ -2227,9 -2256,75 +2256,75 @@@ struct drm_i915_gem_object 
  };
  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  
- void i915_gem_track_fb(struct drm_i915_gem_object *old,
-                      struct drm_i915_gem_object *new,
-                      unsigned frontbuffer_bits);
+ /*
+  * Optimised SGL iterator for GEM objects
+  */
+ static __always_inline struct sgt_iter {
+       struct scatterlist *sgp;
+       union {
+               unsigned long pfn;
+               dma_addr_t dma;
+       };
+       unsigned int curr;
+       unsigned int max;
+ } __sgt_iter(struct scatterlist *sgl, bool dma) {
+       struct sgt_iter s = { .sgp = sgl };
+       if (s.sgp) {
+               s.max = s.curr = s.sgp->offset;
+               s.max += s.sgp->length;
+               if (dma)
+                       s.dma = sg_dma_address(s.sgp);
+               else
+                       s.pfn = page_to_pfn(sg_page(s.sgp));
+       }
+       return s;
+ }
+ /**
+  * __sg_next - return the next scatterlist entry in a list
+  * @sg:               The current sg entry
+  *
+  * Description:
+  *   If the entry is the last, return NULL; otherwise, step to the next
+  *   element in the array (@sg@+1). If that's a chain pointer, follow it;
+  *   otherwise just return the pointer to the current element.
+  **/
+ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
+ {
+ #ifdef CONFIG_DEBUG_SG
+       BUG_ON(sg->sg_magic != SG_MAGIC);
+ #endif
+       return sg_is_last(sg) ? NULL :
+               likely(!sg_is_chain(++sg)) ? sg :
+               sg_chain_ptr(sg);
+ }
+ /**
+  * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
+  * @__dmap:   DMA address (output)
+  * @__iter:   'struct sgt_iter' (iterator state, internal)
+  * @__sgt:    sg_table to iterate over (input)
+  */
+ #define for_each_sgt_dma(__dmap, __iter, __sgt)                               \
+       for ((__iter) = __sgt_iter((__sgt)->sgl, true);                 \
+            ((__dmap) = (__iter).dma + (__iter).curr);                 \
+            (((__iter).curr += PAGE_SIZE) < (__iter).max) ||           \
+            ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
+ /**
+  * for_each_sgt_page - iterate over the pages of the given sg_table
+  * @__pp:     page pointer (output)
+  * @__iter:   'struct sgt_iter' (iterator state, internal)
+  * @__sgt:    sg_table to iterate over (input)
+  */
+ #define for_each_sgt_page(__pp, __iter, __sgt)                                \
+       for ((__iter) = __sgt_iter((__sgt)->sgl, false);                \
+            ((__pp) = (__iter).pfn == 0 ? NULL :                       \
+             pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
+            (((__iter).curr += PAGE_SIZE) < (__iter).max) ||           \
+            ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
  
  /**
   * Request queue structure.
@@@ -2278,6 -2373,9 +2373,9 @@@ struct drm_i915_gem_request 
        /** Position in the ringbuffer of the end of the whole request */
        u32 tail;
  
+       /** Preallocate space in the ringbuffer for the emitting the request */
+       u32 reserved_space;
        /**
         * Context and ring buffer related to this request
         * Contexts are refcounted, so when this request is associated with a
        struct intel_context *ctx;
        struct intel_ringbuffer *ringbuf;
  
+       /**
+        * Context related to the previous request.
+        * As the contexts are accessed by the hardware until the switch is
+        * completed to a new context, the hardware may still be writing
+        * to the context object after the breadcrumb is visible. We must
+        * not unpin/unbind/prune that object whilst still active and so
+        * we keep the previous context pinned until the following (this)
+        * request is retired.
+        */
+       struct intel_context *previous_context;
        /** Batch buffer related to this request if any (used for
            error state dump only) */
        struct drm_i915_gem_object *batch_obj;
        /** Execlists no. of times this request has been sent to the ELSP */
        int elsp_submitted;
  
+       /** Execlists context hardware id. */
+       unsigned ctx_hw_id;
  };
  
  struct drm_i915_gem_request * __must_check
@@@ -2359,23 -2470,9 +2470,9 @@@ i915_gem_request_reference(struct drm_i
  static inline void
  i915_gem_request_unreference(struct drm_i915_gem_request *req)
  {
-       WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
        kref_put(&req->ref, i915_gem_request_free);
  }
  
- static inline void
- i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
- {
-       struct drm_device *dev;
-       if (!req)
-               return;
-       dev = req->engine->dev;
-       if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
-               mutex_unlock(&dev->struct_mutex);
- }
  static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
                                           struct drm_i915_gem_request *src)
  {
@@@ -2503,9 -2600,29 +2600,29 @@@ struct drm_i915_cmd_table 
  #define INTEL_INFO(p)         (&__I915__(p)->info)
  #define INTEL_GEN(p)  (INTEL_INFO(p)->gen)
  #define INTEL_DEVID(p)        (INTEL_INFO(p)->device_id)
- #define INTEL_REVID(p)        (__I915__(p)->dev->pdev->revision)
  
  #define REVID_FOREVER         0xff
+ #define INTEL_REVID(p)        (__I915__(p)->dev->pdev->revision)
+ #define GEN_FOREVER (0)
+ /*
+  * Returns true if Gen is in inclusive range [Start, End].
+  *
+  * Use GEN_FOREVER for unbound start and or end.
+  */
+ #define IS_GEN(p, s, e) ({ \
+       unsigned int __s = (s), __e = (e); \
+       BUILD_BUG_ON(!__builtin_constant_p(s)); \
+       BUILD_BUG_ON(!__builtin_constant_p(e)); \
+       if ((__s) != GEN_FOREVER) \
+               __s = (s) - 1; \
+       if ((__e) == GEN_FOREVER) \
+               __e = BITS_PER_LONG - 1; \
+       else \
+               __e = (e) - 1; \
+       !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+ })
  /*
   * Return true if revision is in range [since,until] inclusive.
   *
  #define IS_VALLEYVIEW(dev)    (INTEL_INFO(dev)->is_valleyview)
  #define IS_CHERRYVIEW(dev)    (INTEL_INFO(dev)->is_cherryview)
  #define IS_HASWELL(dev)       (INTEL_INFO(dev)->is_haswell)
- #define IS_BROADWELL(dev)     (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev))
+ #define IS_BROADWELL(dev)     (INTEL_INFO(dev)->is_broadwell)
  #define IS_SKYLAKE(dev)       (INTEL_INFO(dev)->is_skylake)
  #define IS_BROXTON(dev)               (INTEL_INFO(dev)->is_broxton)
  #define IS_KABYLAKE(dev)      (INTEL_INFO(dev)->is_kabylake)
   * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
   * chips, etc.).
   */
- #define IS_GEN2(dev)  (INTEL_INFO(dev)->gen == 2)
- #define IS_GEN3(dev)  (INTEL_INFO(dev)->gen == 3)
- #define IS_GEN4(dev)  (INTEL_INFO(dev)->gen == 4)
- #define IS_GEN5(dev)  (INTEL_INFO(dev)->gen == 5)
- #define IS_GEN6(dev)  (INTEL_INFO(dev)->gen == 6)
- #define IS_GEN7(dev)  (INTEL_INFO(dev)->gen == 7)
- #define IS_GEN8(dev)  (INTEL_INFO(dev)->gen == 8)
- #define IS_GEN9(dev)  (INTEL_INFO(dev)->gen == 9)
+ #define IS_GEN2(dev)  (INTEL_INFO(dev)->gen_mask & BIT(1))
+ #define IS_GEN3(dev)  (INTEL_INFO(dev)->gen_mask & BIT(2))
+ #define IS_GEN4(dev)  (INTEL_INFO(dev)->gen_mask & BIT(3))
+ #define IS_GEN5(dev)  (INTEL_INFO(dev)->gen_mask & BIT(4))
+ #define IS_GEN6(dev)  (INTEL_INFO(dev)->gen_mask & BIT(5))
+ #define IS_GEN7(dev)  (INTEL_INFO(dev)->gen_mask & BIT(6))
+ #define IS_GEN8(dev)  (INTEL_INFO(dev)->gen_mask & BIT(7))
+ #define IS_GEN9(dev)  (INTEL_INFO(dev)->gen_mask & BIT(8))
  
  #define RENDER_RING           (1<<RCS)
  #define BSD_RING              (1<<VCS)
                                 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
                                 IS_KABYLAKE(dev) || IS_BROXTON(dev))
  #define HAS_RC6(dev)          (INTEL_INFO(dev)->gen >= 6)
- #define HAS_RC6p(dev)         (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+ #define HAS_RC6p(dev)         (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
  
  #define HAS_CSR(dev)  (IS_GEN9(dev))
  
@@@ -2740,6 -2857,9 +2857,9 @@@ extern int i915_max_ioctl
  extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
  extern int i915_resume_switcheroo(struct drm_device *dev);
  
+ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+                               int enable_ppgtt);
  /* i915_dma.c */
  void __printf(3, 4)
  __i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@@ -2760,9 -2880,9 +2880,9 @@@ extern void i915_driver_postclose(struc
  extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
  #endif
- extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
- extern bool intel_has_gpu_reset(struct drm_device *dev);
- extern int i915_reset(struct drm_device *dev);
+ extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
+ extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
+ extern int i915_reset(struct drm_i915_private *dev_priv);
  extern int intel_guc_reset(struct drm_i915_private *dev_priv);
  extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
  extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@@ -2772,30 -2892,33 +2892,33 @@@ extern void i915_update_gfx_val(struct 
  int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
  
  /* intel_hotplug.c */
- void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+                          u32 pin_mask, u32 long_mask);
  void intel_hpd_init(struct drm_i915_private *dev_priv);
  void intel_hpd_init_work(struct drm_i915_private *dev_priv);
  void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
  bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
  
  /* i915_irq.c */
- void i915_queue_hangcheck(struct drm_device *dev);
+ void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
  __printf(3, 4)
- void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+ void i915_handle_error(struct drm_i915_private *dev_priv,
+                      u32 engine_mask,
                       const char *fmt, ...);
  
  extern void intel_irq_init(struct drm_i915_private *dev_priv);
  int intel_irq_install(struct drm_i915_private *dev_priv);
  void intel_irq_uninstall(struct drm_i915_private *dev_priv);
  
- extern void intel_uncore_sanitize(struct drm_device *dev);
- extern void intel_uncore_early_sanitize(struct drm_device *dev,
+ extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
+ extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
                                        bool restore_forcewake);
- extern void intel_uncore_init(struct drm_device *dev);
+ extern void intel_uncore_init(struct drm_i915_private *dev_priv);
  extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
  extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
- extern void intel_uncore_fini(struct drm_device *dev);
- extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+ extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
+ extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+                                        bool restore);
  const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
  void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
                                enum forcewake_domains domains);
@@@ -2811,9 -2934,9 +2934,9 @@@ void intel_uncore_forcewake_put__locked
  u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
  
  void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
- static inline bool intel_vgpu_active(struct drm_device *dev)
+ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
  {
-       return to_i915(dev)->vgpu.active;
+       return dev_priv->vgpu.active;
  }
  
  void
@@@ -2909,7 -3032,7 +3032,7 @@@ int i915_gem_set_tiling(struct drm_devi
                        struct drm_file *file_priv);
  int i915_gem_get_tiling(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
int i915_gem_init_userptr(struct drm_device *dev);
void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
  int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file);
  int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@@ -2919,11 -3042,13 +3042,13 @@@ int i915_gem_wait_ioctl(struct drm_devi
  void i915_gem_load_init(struct drm_device *dev);
  void i915_gem_load_cleanup(struct drm_device *dev);
  void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
+ int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
  void *i915_gem_object_alloc(struct drm_device *dev);
  void i915_gem_object_free(struct drm_i915_gem_object *obj);
  void i915_gem_object_init(struct drm_i915_gem_object *obj,
                         const struct drm_i915_gem_object_ops *ops);
- struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
                                                  size_t size);
  struct drm_i915_gem_object *i915_gem_object_create_from_data(
                struct drm_device *dev, const void *data, size_t size);
@@@ -3054,6 -3179,11 +3179,11 @@@ int i915_gem_dumb_create(struct drm_fil
                         struct drm_mode_create_dumb *args);
  int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      uint32_t handle, uint64_t *offset);
+ void i915_gem_track_fb(struct drm_i915_gem_object *old,
+                      struct drm_i915_gem_object *new,
+                      unsigned frontbuffer_bits);
  /**
   * Returns true if seq1 is later than seq2.
   */
@@@ -3081,13 -3211,13 +3211,13 @@@ static inline bool i915_gem_request_com
                                 req->seqno);
  }
  
- int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+ int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
  int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
  
  struct drm_i915_gem_request *
  i915_gem_find_active_request(struct intel_engine_cs *engine);
  
- bool i915_gem_retire_requests(struct drm_device *dev);
+ bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
  void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
  
  static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@@ -3147,7 -3277,6 +3277,6 @@@ bool i915_gem_clflush_object(struct drm
  int __must_check i915_gem_init(struct drm_device *dev);
  int i915_gem_init_engines(struct drm_device *dev);
  int __must_check i915_gem_init_hw(struct drm_device *dev);
- int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
  void i915_gem_init_swizzling(struct drm_device *dev);
  void i915_gem_cleanup_engines(struct drm_device *dev);
  int __must_check i915_gpu_idle(struct drm_device *dev);
@@@ -3215,8 -3344,6 +3344,6 @@@ bool i915_gem_obj_ggtt_bound_view(struc
  bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
                        struct i915_address_space *vm);
  
- unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
-                               struct i915_address_space *vm);
  struct i915_vma *
  i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
                    struct i915_address_space *vm);
@@@ -3251,14 -3378,8 +3378,8 @@@ static inline bool i915_gem_obj_ggtt_bo
        return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
  }
  
- static inline unsigned long
- i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
- {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       return i915_gem_obj_size(obj, &ggtt->base);
- }
+ unsigned long
+ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
  
  static inline int __must_check
  i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
                                   alignment, flags | PIN_GLOBAL);
  }
  
- static inline int
- i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
- {
-       return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
- }
  void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
                                     const struct i915_ggtt_view *view);
  static inline void
@@@ -3301,10 -3416,10 +3416,10 @@@ void i915_gem_object_save_bit_17_swizzl
  
  /* i915_gem_context.c */
  int __must_check i915_gem_context_init(struct drm_device *dev);
+ void i915_gem_context_lost(struct drm_i915_private *dev_priv);
  void i915_gem_context_fini(struct drm_device *dev);
  void i915_gem_context_reset(struct drm_device *dev);
  int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
- int i915_gem_context_enable(struct drm_i915_gem_request *req);
  void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  int i915_switch_context(struct drm_i915_gem_request *req);
  struct intel_context *
@@@ -3335,6 -3450,8 +3450,8 @@@ int i915_gem_context_getparam_ioctl(str
                                    struct drm_file *file_priv);
  int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv);
+ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
+                                      struct drm_file *file);
  
  /* i915_gem_evict.c */
  int __must_check i915_gem_evict_something(struct drm_device *dev,
@@@ -3349,9 -3466,9 +3466,9 @@@ int __must_check i915_gem_evict_for_vma
  int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
  
  /* belongs in i915_gem_gtt.h */
- static inline void i915_gem_chipset_flush(struct drm_device *dev)
+ static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
  {
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                intel_gtt_chipset_flush();
  }
  
@@@ -3430,18 -3547,19 +3547,19 @@@ static inline void i915_error_state_buf
  {
        kfree(eb->buf);
  }
- void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+ void i915_capture_error_state(struct drm_i915_private *dev_priv,
+                             u32 engine_mask,
                              const char *error_msg);
  void i915_error_state_get(struct drm_device *dev,
                          struct i915_error_state_file_priv *error_priv);
  void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
  void i915_destroy_error_state(struct drm_device *dev);
  
- void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+ void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
  const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
  
  /* i915_cmd_parser.c */
- int i915_cmd_parser_get_version(void);
+ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
  int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
  void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
  bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@@ -3492,7 -3610,7 +3610,7 @@@ bool intel_bios_is_port_hpd_inverted(st
  extern int intel_opregion_setup(struct drm_device *dev);
  extern void intel_opregion_init(struct drm_device *dev);
  extern void intel_opregion_fini(struct drm_device *dev);
- extern void intel_opregion_asle_intr(struct drm_device *dev);
+ extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
  extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
                                         bool enable);
  extern int intel_opregion_notify_adapter(struct drm_device *dev,
@@@ -3502,7 -3620,9 +3620,9 @@@ extern int intel_opregion_get_panel_typ
  static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
  static inline void intel_opregion_init(struct drm_device *dev) { return; }
  static inline void intel_opregion_fini(struct drm_device *dev) { return; }
- static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+ static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+ {
+ }
  static inline int
  intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
  {
@@@ -3538,26 -3658,25 +3658,25 @@@ extern int intel_modeset_vga_set_state(
  extern void intel_display_resume(struct drm_device *dev);
  extern void i915_redisable_vga(struct drm_device *dev);
  extern void i915_redisable_vga_power_on(struct drm_device *dev);
- extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+ extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
  extern void intel_init_pch_refclk(struct drm_device *dev);
- extern void intel_set_rps(struct drm_device *dev, u8 val);
+ extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
  extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
                                  bool enable);
  extern void intel_detect_pch(struct drm_device *dev);
- extern int intel_enable_rc6(const struct drm_device *dev);
  
- extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+ extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
  int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
- int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
-                              struct drm_file *file);
  
  /* overlay */
- extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+ extern struct intel_overlay_error_state *
+ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
  extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct intel_overlay_error_state *error);
  
- extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+ extern struct intel_display_error_state *
+ intel_display_capture_error_state(struct drm_i915_private *dev_priv);
  extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct drm_device *dev,
                                            struct intel_display_error_state *error);
@@@ -3586,6 -3705,24 +3705,24 @@@ void intel_sbi_write(struct drm_i915_pr
  u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
  void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  
+ /* intel_dpio_phy.c */
+ void chv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 deemph_reg_value, u32 margin_reg_value,
+                             bool uniq_trans_scale);
+ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+                             bool reset);
+ void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
+ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+ void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+ void chv_phy_post_pll_disable(struct intel_encoder *encoder);
+ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 demph_reg_value, u32 preemph_reg_value,
+                             u32 uniqtranscale_reg_value, u32 tx3_demph);
+ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
+ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+ void vlv_phy_reset_lanes(struct intel_encoder *encoder);
  int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
  int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
  
@@@ -3676,6 -3813,11 +3813,6 @@@ static inline i915_reg_t i915_vgacntrl_
                return VGACNTRL;
  }
  
 -static inline void __user *to_user_ptr(u64 address)
 -{
 -      return (void __user *)(uintptr_t)address;
 -}
 -
  static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
  {
        unsigned long j = msecs_to_jiffies(m);
@@@ -177,7 -177,7 +177,7 @@@ i915_gem_object_get_pages_phys(struct d
                vaddr += PAGE_SIZE;
        }
  
-       i915_gem_chipset_flush(obj->base.dev);
+       i915_gem_chipset_flush(to_i915(obj->base.dev));
  
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
@@@ -319,7 -319,7 +319,7 @@@ i915_gem_phys_pwrite(struct drm_i915_ge
  {
        struct drm_device *dev = obj->base.dev;
        void *vaddr = obj->phys_handle->vaddr + args->offset;
 -      char __user *user_data = to_user_ptr(args->data_ptr);
 +      char __user *user_data = u64_to_user_ptr(args->data_ptr);
        int ret = 0;
  
        /* We manually control the domain here and pretend that it
        }
  
        drm_clflush_virt_range(vaddr, args->size);
-       i915_gem_chipset_flush(dev);
+       i915_gem_chipset_flush(to_i915(dev));
  
  out:
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@@ -381,9 -381,9 +381,9 @@@ i915_gem_create(struct drm_file *file
                return -EINVAL;
  
        /* Allocate the new object */
-       obj = i915_gem_alloc_object(dev, size);
-       if (obj == NULL)
-               return -ENOMEM;
+       obj = i915_gem_object_create(dev, size);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
  
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
@@@ -600,7 -600,7 +600,7 @@@ i915_gem_shmem_pread(struct drm_device 
        int needs_clflush = 0;
        struct sg_page_iter sg_iter;
  
 -      user_data = to_user_ptr(args->data_ptr);
 +      user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
  
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@@ -687,7 -687,7 +687,7 @@@ i915_gem_pread_ioctl(struct drm_device 
                return 0;
  
        if (!access_ok(VERIFY_WRITE,
 -                     to_user_ptr(args->data_ptr),
 +                     u64_to_user_ptr(args->data_ptr),
                       args->size))
                return -EFAULT;
  
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -779,7 -779,7 +779,7 @@@ i915_gem_gtt_pwrite_fast(struct drm_dev
        if (ret)
                goto out_unpin;
  
 -      user_data = to_user_ptr(args->data_ptr);
 +      user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
  
        offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
@@@ -903,7 -903,7 +903,7 @@@ i915_gem_shmem_pwrite(struct drm_devic
        int needs_clflush_before = 0;
        struct sg_page_iter sg_iter;
  
 -      user_data = to_user_ptr(args->data_ptr);
 +      user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
  
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@@ -1006,7 -1006,7 +1006,7 @@@ out
        }
  
        if (needs_clflush_after)
-               i915_gem_chipset_flush(dev);
+               i915_gem_chipset_flush(to_i915(dev));
        else
                obj->cache_dirty = true;
  
@@@ -1032,12 -1032,12 +1032,12 @@@ i915_gem_pwrite_ioctl(struct drm_devic
                return 0;
  
        if (!access_ok(VERIFY_READ,
 -                     to_user_ptr(args->data_ptr),
 +                     u64_to_user_ptr(args->data_ptr),
                       args->size))
                return -EFAULT;
  
        if (likely(!i915.prefault_disable)) {
 -              ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
 +              ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
                                                   args->size);
                if (ret)
                        return -EFAULT;
        if (ret)
                goto put_rpm;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -1230,8 -1230,7 +1230,7 @@@ int __i915_wait_request(struct drm_i915
                        struct intel_rps_client *rps)
  {
        struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = req->i915;
        const bool irq_test_in_progress =
                ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@@ -1413,6 -1412,13 +1412,13 @@@ static void i915_gem_request_retire(str
        list_del_init(&request->list);
        i915_gem_request_remove_from_client(request);
  
+       if (request->previous_context) {
+               if (i915.enable_execlists)
+                       intel_lr_context_unpin(request->previous_context,
+                                              request->engine);
+       }
+       i915_gem_context_unreference(request->ctx);
        i915_gem_request_unreference(request);
  }
  
@@@ -1422,7 -1428,7 +1428,7 @@@ __i915_gem_request_retire__upto(struct 
        struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *tmp;
  
-       lockdep_assert_held(&engine->dev->struct_mutex);
+       lockdep_assert_held(&engine->i915->dev->struct_mutex);
  
        if (list_empty(&req->list))
                return;
@@@ -1611,7 -1617,7 +1617,7 @@@ i915_gem_set_domain_ioctl(struct drm_de
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -1659,7 -1665,7 +1665,7 @@@ i915_gem_sw_finish_ioctl(struct drm_dev
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -1703,10 -1709,10 +1709,10 @@@ i915_gem_mmap_ioctl(struct drm_device *
        if (args->flags & ~(I915_MMAP_WC))
                return -EINVAL;
  
 -      if (args->flags & I915_MMAP_WC && !cpu_has_pat)
 +      if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
                return -ENODEV;
  
 -      obj = drm_gem_object_lookup(dev, file, args->handle);
 +      obj = drm_gem_object_lookup(file, args->handle);
        if (obj == NULL)
                return -ENOENT;
  
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
  
 -              down_write(&mm->mmap_sem);
 +              if (down_write_killable(&mm->mmap_sem)) {
 +                      drm_gem_object_unreference_unlocked(obj);
 +                      return -EINTR;
 +              }
                vma = find_vma(mm, addr);
                if (vma)
                        vma->vm_page_prot =
@@@ -1982,7 -1985,7 +1988,7 @@@ i915_gem_get_gtt_size(struct drm_devic
                return size;
  
        /* Previous chips need a power-of-two fence region when tiling */
-       if (INTEL_INFO(dev)->gen == 3)
+       if (IS_GEN3(dev))
                gtt_size = 1024*1024;
        else
                gtt_size = 512*1024;
@@@ -2072,7 -2075,7 +2078,7 @@@ i915_gem_mmap_gtt(struct drm_file *file
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -2162,7 -2165,8 +2168,8 @@@ i915_gem_object_invalidate(struct drm_i
  static void
  i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
  {
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
+       struct page *page;
        int ret;
  
        BUG_ON(obj->madv == __I915_MADV_PURGED);
        if (obj->madv == I915_MADV_DONTNEED)
                obj->dirty = 0;
  
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_page_iter_page(&sg_iter);
+       for_each_sgt_page(page, sgt_iter, obj->pages) {
                if (obj->dirty)
                        set_page_dirty(page);
  
@@@ -2243,7 -2245,7 +2248,7 @@@ i915_gem_object_get_pages_gtt(struct dr
        struct address_space *mapping;
        struct sg_table *st;
        struct scatterlist *sg;
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        int ret;
  
  err_pages:
        sg_mark_end(sg);
-       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-               put_page(sg_page_iter_page(&sg_iter));
+       for_each_sgt_page(page, sgt_iter, st)
+               put_page(page);
        sg_free_table(st);
        kfree(st);
  
@@@ -2395,6 -2397,44 +2400,44 @@@ i915_gem_object_get_pages(struct drm_i9
        return 0;
  }
  
+ /* The 'mapping' part of i915_gem_object_pin_map() below */
+ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+ {
+       unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+       struct sg_table *sgt = obj->pages;
+       struct sgt_iter sgt_iter;
+       struct page *page;
+       struct page *stack_pages[32];
+       struct page **pages = stack_pages;
+       unsigned long i = 0;
+       void *addr;
+       /* A single page can always be kmapped */
+       if (n_pages == 1)
+               return kmap(sg_page(sgt->sgl));
+       if (n_pages > ARRAY_SIZE(stack_pages)) {
+               /* Too big for stack -- allocate temporary array instead */
+               pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+               if (!pages)
+                       return NULL;
+       }
+       for_each_sgt_page(page, sgt_iter, sgt)
+               pages[i++] = page;
+       /* Check that we have the expected number of pages */
+       GEM_BUG_ON(i != n_pages);
+       addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+       if (pages != stack_pages)
+               drm_free_large(pages);
+       return addr;
+ }
+ /* get, pin, and map the pages of the object into kernel space */
  void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
  {
        int ret;
  
        i915_gem_object_pin_pages(obj);
  
-       if (obj->mapping == NULL) {
-               struct page **pages;
-               pages = NULL;
-               if (obj->base.size == PAGE_SIZE)
-                       obj->mapping = kmap(sg_page(obj->pages->sgl));
-               else
-                       pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
-                                              sizeof(*pages),
-                                              GFP_TEMPORARY);
-               if (pages != NULL) {
-                       struct sg_page_iter sg_iter;
-                       int n;
-                       n = 0;
-                       for_each_sg_page(obj->pages->sgl, &sg_iter,
-                                        obj->pages->nents, 0)
-                               pages[n++] = sg_page_iter_page(&sg_iter);
-                       obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
-                       drm_free_large(pages);
-               }
-               if (obj->mapping == NULL) {
+       if (!obj->mapping) {
+               obj->mapping = i915_gem_object_map(obj);
+               if (!obj->mapping) {
                        i915_gem_object_unpin_pages(obj);
                        return ERR_PTR(-ENOMEM);
                }
@@@ -2502,9 -2522,8 +2525,8 @@@ i915_gem_object_retire__read(struct drm
  }
  
  static int
- i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int ret;
  
                if (ret)
                        return ret;
        }
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev_priv);
  
        /* Finally reset hw state */
        for_each_engine(engine, dev_priv)
@@@ -2534,7 -2553,7 +2556,7 @@@ int i915_gem_set_seqno(struct drm_devic
        /* HWS page needs to be set less than what we
         * will inject to ring
         */
-       ret = i915_gem_init_seqno(dev, seqno - 1);
+       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
        if (ret)
                return ret;
  
  }
  
  int
- i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+ i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        /* reserve 0 for non-seqno */
        if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_init_seqno(dev, 0);
+               int ret = i915_gem_init_seqno(dev_priv, 0);
                if (ret)
                        return ret;
  
@@@ -2580,6 -2597,7 +2600,7 @@@ void __i915_add_request(struct drm_i915
        struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
+       u32 reserved_tail;
        int ret;
  
        if (WARN_ON(request == NULL))
         * should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
-       intel_ring_reserved_space_use(ringbuf);
        request_start = intel_ring_get_tail(ringbuf);
+       reserved_tail = request->reserved_space;
+       request->reserved_space = 0;
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
  
-       i915_queue_hangcheck(engine->dev);
+       i915_queue_hangcheck(engine->i915);
  
        queue_delayed_work(dev_priv->wq,
                           &dev_priv->mm.retire_work,
                           round_jiffies_up_relative(HZ));
-       intel_mark_busy(dev_priv->dev);
+       intel_mark_busy(dev_priv);
  
        /* Sanity check that the reserved size was large enough. */
-       intel_ring_reserved_space_end(ringbuf);
+       ret = intel_ring_get_tail(ringbuf) - request_start;
+       if (ret < 0)
+               ret += ringbuf->size;
+       WARN_ONCE(ret > reserved_tail,
+                 "Not enough space reserved (%d bytes) "
+                 "for adding the request (%d bytes)\n",
+                 reserved_tail, ret);
  }
  
  static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@@ -2712,18 -2737,6 +2740,6 @@@ void i915_gem_request_free(struct kref 
  {
        struct drm_i915_gem_request *req = container_of(req_ref,
                                                 typeof(*req), ref);
-       struct intel_context *ctx = req->ctx;
-       if (req->file_priv)
-               i915_gem_request_remove_from_client(req);
-       if (ctx) {
-               if (i915.enable_execlists && ctx != req->i915->kernel_context)
-                       intel_lr_context_unpin(ctx, req->engine);
-               i915_gem_context_unreference(ctx);
-       }
        kmem_cache_free(req->i915->requests, req);
  }
  
@@@ -2732,7 -2745,7 +2748,7 @@@ __i915_gem_request_alloc(struct intel_e
                         struct intel_context *ctx,
                         struct drm_i915_gem_request **req_out)
  {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
        unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
        int ret;
        if (req == NULL)
                return -ENOMEM;
  
-       ret = i915_gem_get_seqno(engine->dev, &req->seqno);
+       ret = i915_gem_get_seqno(engine->i915, &req->seqno);
        if (ret)
                goto err;
  
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
  
-       if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(req);
-       else
-               ret = intel_ring_alloc_request_extras(req);
-       if (ret) {
-               i915_gem_context_unreference(req->ctx);
-               goto err;
-       }
        /*
         * Reserve space in the ring buffer for all the commands required to
         * eventually emit this request. This is to guarantee that the
         * to be redone if the request is not actually submitted straight
         * away, e.g. because a GPU scheduler has deferred it.
         */
+       req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
        if (i915.enable_execlists)
-               ret = intel_logical_ring_reserve_space(req);
+               ret = intel_logical_ring_alloc_request_extras(req);
        else
-               ret = intel_ring_reserve_space(req);
-       if (ret) {
-               /*
-                * At this point, the request is fully allocated even if not
-                * fully prepared. Thus it can be cleaned up using the proper
-                * free code.
-                */
-               intel_ring_reserved_space_cancel(req->ringbuf);
-               i915_gem_request_unreference(req);
-               return ret;
-       }
+               ret = intel_ring_alloc_request_extras(req);
+       if (ret)
+               goto err_ctx;
  
        *req_out = req;
        return 0;
  
+ err_ctx:
+       i915_gem_context_unreference(ctx);
  err:
        kmem_cache_free(dev_priv->requests, req);
        return ret;
@@@ -2824,7 -2824,7 +2827,7 @@@ i915_gem_request_alloc(struct intel_eng
        int err;
  
        if (ctx == NULL)
-               ctx = to_i915(engine->dev)->kernel_context;
+               ctx = engine->i915->kernel_context;
        err = __i915_gem_request_alloc(engine, ctx, &req);
        return err ? ERR_PTR(err) : req;
  }
@@@ -2888,13 -2888,7 +2891,7 @@@ static void i915_gem_reset_engine_clean
                /* Ensure irq handler finishes or is cancelled. */
                tasklet_kill(&engine->irq_tasklet);
  
-               spin_lock_bh(&engine->execlist_lock);
-               /* list_splice_tail_init checks for empty lists */
-               list_splice_tail_init(&engine->execlist_queue,
-                                     &engine->execlist_retired_req_list);
-               spin_unlock_bh(&engine->execlist_lock);
-               intel_execlists_retire_requests(engine);
+               intel_execlists_cancel_requests(engine);
        }
  
        /*
@@@ -3005,9 -2999,8 +3002,8 @@@ i915_gem_retire_requests_ring(struct in
  }
  
  bool
- i915_gem_retire_requests(struct drm_device *dev)
+ i915_gem_retire_requests(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        bool idle = true;
  
                        spin_lock_bh(&engine->execlist_lock);
                        idle &= list_empty(&engine->execlist_queue);
                        spin_unlock_bh(&engine->execlist_lock);
-                       intel_execlists_retire_requests(engine);
                }
        }
  
@@@ -3042,7 -3033,7 +3036,7 @@@ i915_gem_retire_work_handler(struct wor
        /* Come back later if the device is busy... */
        idle = false;
        if (mutex_trylock(&dev->struct_mutex)) {
-               idle = i915_gem_retire_requests(dev);
+               idle = i915_gem_retire_requests(dev_priv);
                mutex_unlock(&dev->struct_mutex);
        }
        if (!idle)
@@@ -3066,7 -3057,7 +3060,7 @@@ i915_gem_idle_work_handler(struct work_
         * Also locking seems to be fubar here, engine->request_list is protected
         * by dev->struct_mutex. */
  
-       intel_mark_idle(dev);
+       intel_mark_idle(dev_priv);
  
        if (mutex_trylock(&dev->struct_mutex)) {
                for_each_engine(engine, dev_priv)
@@@ -3096,14 -3087,8 +3090,8 @@@ i915_gem_object_flush_active(struct drm
                if (req == NULL)
                        continue;
  
-               if (list_empty(&req->list))
-                       goto retire;
-               if (i915_gem_request_completed(req, true)) {
-                       __i915_gem_request_retire__upto(req);
- retire:
+               if (i915_gem_request_completed(req, true))
                        i915_gem_object_retire__read(obj, i);
-               }
        }
  
        return 0;
@@@ -3147,7 -3132,7 +3135,7 @@@ i915_gem_wait_ioctl(struct drm_device *
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
        if (&obj->base == NULL) {
                mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
                        ret = __i915_wait_request(req[i], true,
                                                  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                                  to_rps_client(file));
-               i915_gem_request_unreference__unlocked(req[i]);
+               i915_gem_request_unreference(req[i]);
        }
        return ret;
  
@@@ -3211,7 -3196,7 +3199,7 @@@ __i915_gem_object_sync(struct drm_i915_
        if (i915_gem_request_completed(from_req, true))
                return 0;
  
-       if (!i915_semaphore_is_enabled(obj->base.dev)) {
+       if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
                struct drm_i915_private *i915 = to_i915(obj->base.dev);
                ret = __i915_wait_request(from_req,
                                          i915->mm.interruptible,
@@@ -3345,6 -3330,17 +3333,17 @@@ static void i915_gem_object_finish_gtt(
                                            old_write_domain);
  }
  
+ static void __i915_vma_iounmap(struct i915_vma *vma)
+ {
+       GEM_BUG_ON(vma->pin_count);
+       if (vma->iomap == NULL)
+               return;
+       io_mapping_unmap(vma->iomap);
+       vma->iomap = NULL;
+ }
  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
  {
        struct drm_i915_gem_object *obj = vma->obj;
                ret = i915_gem_object_put_fence(obj);
                if (ret)
                        return ret;
+               __i915_vma_iounmap(vma);
        }
  
        trace_i915_vma_unbind(vma);
@@@ -3731,7 -3729,7 +3732,7 @@@ i915_gem_object_flush_cpu_write_domain(
                return;
  
        if (i915_gem_clflush_object(obj, obj->pin_display))
-               i915_gem_chipset_flush(obj->base.dev);
+               i915_gem_chipset_flush(to_i915(obj->base.dev));
  
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
@@@ -3929,7 -3927,7 +3930,7 @@@ out
            obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
            cpu_write_needs_clflush(obj)) {
                if (i915_gem_clflush_object(obj, true))
-                       i915_gem_chipset_flush(obj->base.dev);
+                       i915_gem_chipset_flush(to_i915(obj->base.dev));
        }
  
        return 0;
@@@ -3941,7 -3939,7 +3942,7 @@@ int i915_gem_get_caching_ioctl(struct d
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL)
                return -ENOENT;
  
@@@ -4002,7 -4000,7 +4003,7 @@@ int i915_gem_set_caching_ioctl(struct d
        if (ret)
                goto rpm_put;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -4198,7 -4196,7 +4199,7 @@@ i915_gem_ring_throttle(struct drm_devic
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  
-       i915_gem_request_unreference__unlocked(target);
+       i915_gem_request_unreference(target);
  
        return ret;
  }
@@@ -4372,7 -4370,7 +4373,7 @@@ i915_gem_busy_ioctl(struct drm_device *
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -4437,7 -4435,7 +4438,7 @@@ i915_gem_madvise_ioctl(struct drm_devic
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -4499,21 -4497,21 +4500,21 @@@ static const struct drm_i915_gem_object
        .put_pages = i915_gem_object_put_pages_gtt,
  };
  
- struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
                                                  size_t size)
  {
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
        gfp_t mask;
+       int ret;
  
        obj = i915_gem_object_alloc(dev);
        if (obj == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
  
-       if (drm_gem_object_init(dev, &obj->base, size) != 0) {
-               i915_gem_object_free(obj);
-               return NULL;
-       }
+       ret = drm_gem_object_init(dev, &obj->base, size);
+       if (ret)
+               goto fail;
  
        mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
        if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
        trace_i915_gem_object_create(obj);
  
        return obj;
+ fail:
+       i915_gem_object_free(obj);
+       return ERR_PTR(ret);
  }
  
  static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@@ -4655,16 -4658,12 +4661,12 @@@ struct i915_vma *i915_gem_obj_to_vma(st
  struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
                                           const struct i915_ggtt_view *view)
  {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
  
-       BUG_ON(!view);
+       GEM_BUG_ON(!view);
  
        list_for_each_entry(vma, &obj->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma;
        return NULL;
  }
@@@ -4706,9 -4705,10 +4708,10 @@@ i915_gem_suspend(struct drm_device *dev
        if (ret)
                goto err;
  
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev_priv);
  
        i915_gem_stop_engines(dev);
+       i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
  
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@@ -4727,37 -4727,6 +4730,6 @@@ err
        return ret;
  }
  
- int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
- {
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
-       int i, ret;
-       if (!HAS_L3_DPF(dev) || !remap_info)
-               return 0;
-       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
-       if (ret)
-               return ret;
-       /*
-        * Note: We do not worry about the concurrent register cacheline hang
-        * here because no other code should access these registers other than
-        * at initialization time.
-        */
-       for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
-               intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
-               intel_ring_emit(engine, remap_info[i]);
-       }
-       intel_ring_advance(engine);
-       return ret;
- }
  void i915_gem_init_swizzling(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -4862,7 -4831,7 +4834,7 @@@ i915_gem_init_hw(struct drm_device *dev
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
-       int ret, j;
+       int ret;
  
        /* Double layer security blanket, see i915_gem_init() */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
         * on re-initialisation
         */
        ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
-       if (ret)
-               goto out;
-       /* Now it is safe to go back round and do everything else: */
-       for_each_engine(engine, dev_priv) {
-               struct drm_i915_gem_request *req;
-               req = i915_gem_request_alloc(engine, NULL);
-               if (IS_ERR(req)) {
-                       ret = PTR_ERR(req);
-                       break;
-               }
-               if (engine->id == RCS) {
-                       for (j = 0; j < NUM_L3_SLICES(dev); j++) {
-                               ret = i915_gem_l3_remap(req, j);
-                               if (ret)
-                                       goto err_request;
-                       }
-               }
-               ret = i915_ppgtt_init_ring(req);
-               if (ret)
-                       goto err_request;
-               ret = i915_gem_context_enable(req);
-               if (ret)
-                       goto err_request;
- err_request:
-               i915_add_request_no_flush(req);
-               if (ret) {
-                       DRM_ERROR("Failed to enable %s, error=%d\n",
-                                 engine->name, ret);
-                       i915_gem_cleanup_engines(dev);
-                       break;
-               }
-       }
  
  out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@@ -4977,9 -4908,6 +4911,6 @@@ int i915_gem_init(struct drm_device *de
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
-       i915.enable_execlists = intel_sanitize_enable_execlists(dev,
-                       i915.enable_execlists);
        mutex_lock(&dev->struct_mutex);
  
        if (!i915.enable_execlists) {
         */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  
-       ret = i915_gem_init_userptr(dev);
-       if (ret)
-               goto out_unlock;
+       i915_gem_init_userptr(dev_priv);
        i915_gem_init_ggtt(dev);
  
        ret = i915_gem_context_init(dev);
@@@ -5042,14 -4967,6 +4970,6 @@@ i915_gem_cleanup_engines(struct drm_dev
  
        for_each_engine(engine, dev_priv)
                dev_priv->gt.cleanup_engine(engine);
-       if (i915.enable_execlists)
-               /*
-                * Neither the BIOS, ourselves or any other kernel
-                * expects the system to be in execlists mode on startup,
-                * so we need to reset the GPU back to legacy mode.
-                */
-               intel_gpu_reset(dev, ALL_ENGINES);
  }
  
  static void
@@@ -5073,7 -4990,7 +4993,7 @@@ i915_gem_load_init_fences(struct drm_i9
        else
                dev_priv->num_fence_regs = 8;
  
-       if (intel_vgpu_active(dev))
+       if (intel_vgpu_active(dev_priv))
                dev_priv->num_fence_regs =
                                I915_READ(vgtif_reg(avail_rs.fence_num));
  
@@@ -5148,6 -5065,34 +5068,34 @@@ void i915_gem_load_cleanup(struct drm_d
        kmem_cache_destroy(dev_priv->objects);
  }
  
+ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+ {
+       struct drm_i915_gem_object *obj;
+       /* Called just before we write the hibernation image.
+        *
+        * We need to update the domain tracking to reflect that the CPU
+        * will be accessing all the pages to create and restore from the
+        * hibernation, and so upon restoration those pages will be in the
+        * CPU domain.
+        *
+        * To make sure the hibernation image contains the latest state,
+        * we update that state just before writing out the image.
+        */
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+       return 0;
+ }
  void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  {
        struct drm_i915_file_private *file_priv = file->driver_priv;
@@@ -5254,13 -5199,10 +5202,10 @@@ u64 i915_gem_obj_offset(struct drm_i915
  u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
  {
-       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
  
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
  
        WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@@ -5286,12 -5228,10 +5231,10 @@@ bool i915_gem_obj_bound(struct drm_i915
  bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
  {
-       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
  
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
+               if (vma->is_ggtt &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
                        return true;
@@@ -5310,23 -5250,18 +5253,18 @@@ bool i915_gem_obj_bound_any(struct drm_
        return false;
  }
  
- unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
-                               struct i915_address_space *vm)
+ unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
  {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
  
-       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-       BUG_ON(list_empty(&o->vma_list));
+       GEM_BUG_ON(list_empty(&o->vma_list));
  
        list_for_each_entry(vma, &o->vma_list, obj_link) {
                if (vma->is_ggtt &&
-                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-                       continue;
-               if (vma->vm == vm)
+                   vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
                        return vma->node.size;
        }
        return 0;
  }
  
@@@ -5365,8 -5300,8 +5303,8 @@@ i915_gem_object_create_from_data(struc
        size_t bytes;
        int ret;
  
-       obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
-       if (IS_ERR_OR_NULL(obj))
+       obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+       if (IS_ERR(obj))
                return obj;
  
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@@ -489,7 -489,7 +489,7 @@@ i915_gem_execbuffer_relocate_entry(stru
                ret = relocate_entry_cpu(obj, reloc, target_offset);
        else if (obj->map_and_fenceable)
                ret = relocate_entry_gtt(obj, reloc, target_offset);
 -      else if (cpu_has_clflush)
 +      else if (static_cpu_has(X86_FEATURE_CLFLUSH))
                ret = relocate_entry_clflush(obj, reloc, target_offset);
        else {
                WARN_ONCE(1, "Impossible case in relocation handling\n");
@@@ -515,7 -515,7 +515,7 @@@ i915_gem_execbuffer_relocate_vma(struc
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        int remain, ret;
  
 -      user_relocs = to_user_ptr(entry->relocs_ptr);
 +      user_relocs = u64_to_user_ptr(entry->relocs_ptr);
  
        remain = entry->relocation_count;
        while (remain) {
                                return ret;
  
                        if (r->presumed_offset != offset &&
 -                          __copy_to_user_inatomic(&user_relocs->presumed_offset,
 -                                                  &r->presumed_offset,
 -                                                  sizeof(r->presumed_offset))) {
 +                          __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
                                return -EFAULT;
                        }
  
@@@ -722,7 -724,7 +722,7 @@@ i915_gem_execbuffer_reserve(struct inte
        struct i915_address_space *vm;
        struct list_head ordered_vmas;
        struct list_head pinned_vmas;
-       bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+       bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
        int retry;
  
        i915_gem_retire_requests_ring(engine);
@@@ -867,7 -869,7 +867,7 @@@ i915_gem_execbuffer_relocate_slow(struc
                u64 invalid_offset = (u64)-1;
                int j;
  
 -              user_relocs = to_user_ptr(exec[i].relocs_ptr);
 +              user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
  
                if (copy_from_user(reloc+total, user_relocs,
                                   exec[i].relocation_count * sizeof(*reloc))) {
@@@ -963,7 -965,7 +963,7 @@@ i915_gem_execbuffer_move_to_gpu(struct 
        }
  
        if (flush_chipset)
-               i915_gem_chipset_flush(req->engine->dev);
+               i915_gem_chipset_flush(req->engine->i915);
  
        if (flush_domains & I915_GEM_DOMAIN_GTT)
                wmb();
@@@ -1012,7 -1014,7 +1012,7 @@@ validate_exec_list(struct drm_device *d
                invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
  
        for (i = 0; i < count; i++) {
 -              char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
 +              char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
                int length; /* limited by fault_in_pages_readable() */
  
                if (exec[i].flags & invalid_flags)
@@@ -1083,14 -1085,6 +1083,6 @@@ i915_gem_validate_context(struct drm_de
                return ERR_PTR(-EIO);
        }
  
-       if (i915.enable_execlists && !ctx->engine[engine->id].state) {
-               int ret = intel_lr_context_deferred_alloc(ctx, engine);
-               if (ret) {
-                       DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
-                       return ERR_PTR(ret);
-               }
-       }
        return ctx;
  }
  
@@@ -1125,7 -1119,7 +1117,7 @@@ i915_gem_execbuffer_move_to_active(stru
                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
                        i915_gem_request_assign(&obj->last_fenced_req, req);
                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-                               struct drm_i915_private *dev_priv = to_i915(engine->dev);
+                               struct drm_i915_private *dev_priv = engine->i915;
                                list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
                                               &dev_priv->mm.fence_list);
                        }
@@@ -1695,7 -1689,7 +1687,7 @@@ i915_gem_execbuffer(struct drm_device *
                return -ENOMEM;
        }
        ret = copy_from_user(exec_list,
 -                           to_user_ptr(args->buffers_ptr),
 +                           u64_to_user_ptr(args->buffers_ptr),
                             sizeof(*exec_list) * args->buffer_count);
        if (ret != 0) {
                DRM_DEBUG("copy %d exec entries failed %d\n",
        ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
        if (!ret) {
                struct drm_i915_gem_exec_object __user *user_exec_list =
 -                      to_user_ptr(args->buffers_ptr);
 +                      u64_to_user_ptr(args->buffers_ptr);
  
                /* Copy the new buffer offsets back to the user's exec list. */
                for (i = 0; i < args->buffer_count; i++) {
@@@ -1783,7 -1777,7 +1775,7 @@@ i915_gem_execbuffer2(struct drm_device 
                return -ENOMEM;
        }
        ret = copy_from_user(exec2_list,
 -                           to_user_ptr(args->buffers_ptr),
 +                           u64_to_user_ptr(args->buffers_ptr),
                             sizeof(*exec2_list) * args->buffer_count);
        if (ret != 0) {
                DRM_DEBUG("copy %d exec entries failed %d\n",
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                struct drm_i915_gem_exec_object2 __user *user_exec_list =
 -                                 to_user_ptr(args->buffers_ptr);
 +                                 u64_to_user_ptr(args->buffers_ptr);
                int i;
  
                for (i = 0; i < args->buffer_count; i++) {
@@@ -125,7 -125,7 +125,7 @@@ i915_gem_object_fence_ok(struct drm_i91
        if (INTEL_INFO(obj->base.dev)->gen >= 4)
                return true;
  
-       if (INTEL_INFO(obj->base.dev)->gen == 3) {
+       if (IS_GEN3(obj->base.dev)) {
                if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
                        return false;
        } else {
@@@ -166,7 -166,7 +166,7 @@@ i915_gem_set_tiling(struct drm_device *
        struct drm_i915_gem_object *obj;
        int ret = 0;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL)
                return -ENOENT;
  
                 */
                if (obj->map_and_fenceable &&
                    !i915_gem_object_fence_ok(obj, args->tiling_mode))
-                       ret = i915_gem_object_ggtt_unbind(obj);
+                       ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
  
                if (ret == 0) {
                        if (obj->pages &&
@@@ -297,7 -297,7 +297,7 @@@ i915_gem_get_tiling(struct drm_device *
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL)
                return -ENOENT;
  
  #include <linux/reservation.h>
  #include <linux/dma-buf.h>
  
+ static bool is_mmio_work(struct intel_flip_work *work)
+ {
+       return work->mmio_work.func;
+ }
  /* Primary plane formats for gen <= 3 */
  static const uint32_t i8xx_primary_formats[] = {
        DRM_FORMAT_C8,
@@@ -117,20 -122,17 +122,17 @@@ static void ironlake_pfit_disable(struc
  static void ironlake_pfit_enable(struct intel_crtc *crtc);
  static void intel_modeset_setup_hw_state(struct drm_device *dev);
  static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+ static int ilk_max_pixel_rate(struct drm_atomic_state *state);
  
- typedef struct {
-       int     min, max;
- } intel_range_t;
- typedef struct {
-       int     dot_limit;
-       int     p2_slow, p2_fast;
- } intel_p2_t;
- typedef struct intel_limit intel_limit_t;
  struct intel_limit {
-       intel_range_t   dot, vco, n, m, m1, m2, p, p1;
-       intel_p2_t          p2;
+       struct {
+               int min, max;
+       } dot, vco, n, m, m1, m2, p, p1;
+       struct {
+               int dot_limit;
+               int p2_slow, p2_fast;
+       } p2;
  };
  
  /* returns HPLL frequency in kHz */
@@@ -185,6 -187,7 +187,7 @@@ intel_pch_rawclk(struct drm_i915_privat
  static int
  intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
  {
+       /* RAWCLK_FREQ_VLV register updated from power well code */
        return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
                                      CCK_DISPLAY_REF_CLOCK_CONTROL);
  }
@@@ -218,7 -221,7 +221,7 @@@ intel_g4x_hrawclk(struct drm_i915_priva
        }
  }
  
static void intel_update_rawclk(struct drm_i915_private *dev_priv)
+ void intel_update_rawclk(struct drm_i915_private *dev_priv)
  {
        if (HAS_PCH_SPLIT(dev_priv))
                dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@@ -255,7 -258,7 +258,7 @@@ intel_fdi_link_freq(struct drm_i915_pri
                return 270000;
  }
  
- static const intel_limit_t intel_limits_i8xx_dac = {
+ static const struct intel_limit intel_limits_i8xx_dac = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 908000, .max = 1512000 },
        .n = { .min = 2, .max = 16 },
                .p2_slow = 4, .p2_fast = 2 },
  };
  
- static const intel_limit_t intel_limits_i8xx_dvo = {
+ static const struct intel_limit intel_limits_i8xx_dvo = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 908000, .max = 1512000 },
        .n = { .min = 2, .max = 16 },
                .p2_slow = 4, .p2_fast = 4 },
  };
  
- static const intel_limit_t intel_limits_i8xx_lvds = {
+ static const struct intel_limit intel_limits_i8xx_lvds = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 908000, .max = 1512000 },
        .n = { .min = 2, .max = 16 },
                .p2_slow = 14, .p2_fast = 7 },
  };
  
- static const intel_limit_t intel_limits_i9xx_sdvo = {
+ static const struct intel_limit intel_limits_i9xx_sdvo = {
        .dot = { .min = 20000, .max = 400000 },
        .vco = { .min = 1400000, .max = 2800000 },
        .n = { .min = 1, .max = 6 },
                .p2_slow = 10, .p2_fast = 5 },
  };
  
- static const intel_limit_t intel_limits_i9xx_lvds = {
+ static const struct intel_limit intel_limits_i9xx_lvds = {
        .dot = { .min = 20000, .max = 400000 },
        .vco = { .min = 1400000, .max = 2800000 },
        .n = { .min = 1, .max = 6 },
  };
  
  
- static const intel_limit_t intel_limits_g4x_sdvo = {
+ static const struct intel_limit intel_limits_g4x_sdvo = {
        .dot = { .min = 25000, .max = 270000 },
        .vco = { .min = 1750000, .max = 3500000},
        .n = { .min = 1, .max = 4 },
        },
  };
  
- static const intel_limit_t intel_limits_g4x_hdmi = {
+ static const struct intel_limit intel_limits_g4x_hdmi = {
        .dot = { .min = 22000, .max = 400000 },
        .vco = { .min = 1750000, .max = 3500000},
        .n = { .min = 1, .max = 4 },
                .p2_slow = 10, .p2_fast = 5 },
  };
  
- static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+ static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
        .dot = { .min = 20000, .max = 115000 },
        .vco = { .min = 1750000, .max = 3500000 },
        .n = { .min = 1, .max = 3 },
        },
  };
  
- static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+ static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
        .dot = { .min = 80000, .max = 224000 },
        .vco = { .min = 1750000, .max = 3500000 },
        .n = { .min = 1, .max = 3 },
        },
  };
  
- static const intel_limit_t intel_limits_pineview_sdvo = {
+ static const struct intel_limit intel_limits_pineview_sdvo = {
        .dot = { .min = 20000, .max = 400000},
        .vco = { .min = 1700000, .max = 3500000 },
        /* Pineview's Ncounter is a ring counter */
                .p2_slow = 10, .p2_fast = 5 },
  };
  
- static const intel_limit_t intel_limits_pineview_lvds = {
+ static const struct intel_limit intel_limits_pineview_lvds = {
        .dot = { .min = 20000, .max = 400000 },
        .vco = { .min = 1700000, .max = 3500000 },
        .n = { .min = 3, .max = 6 },
   * We calculate clock using (register_value + 2) for N/M1/M2, so here
   * the range value for them is (actual_value - 2).
   */
- static const intel_limit_t intel_limits_ironlake_dac = {
+ static const struct intel_limit intel_limits_ironlake_dac = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 5 },
                .p2_slow = 10, .p2_fast = 5 },
  };
  
- static const intel_limit_t intel_limits_ironlake_single_lvds = {
+ static const struct intel_limit intel_limits_ironlake_single_lvds = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 3 },
                .p2_slow = 14, .p2_fast = 14 },
  };
  
- static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+ static const struct intel_limit intel_limits_ironlake_dual_lvds = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 3 },
  };
  
  /* LVDS 100mhz refclk limits. */
- static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+ static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 2 },
                .p2_slow = 14, .p2_fast = 14 },
  };
  
- static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 3 },
                .p2_slow = 7, .p2_fast = 7 },
  };
  
- static const intel_limit_t intel_limits_vlv = {
+ static const struct intel_limit intel_limits_vlv = {
         /*
          * These are the data rate limits (measured in fast clocks)
          * since those are the strictest limits we have. The fast
        .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
  };
  
- static const intel_limit_t intel_limits_chv = {
+ static const struct intel_limit intel_limits_chv = {
        /*
         * These are the data rate limits (measured in fast clocks)
         * since those are the strictest limits we have.  The fast
        .p2 = { .p2_slow = 1, .p2_fast = 14 },
  };
  
- static const intel_limit_t intel_limits_bxt = {
+ static const struct intel_limit intel_limits_bxt = {
        /* FIXME: find real dot limits */
        .dot = { .min = 0, .max = INT_MAX },
        .vco = { .min = 4800000, .max = 6700000 },
@@@ -581,7 -584,7 +584,7 @@@ static bool intel_pipe_will_have_type(c
   * divided-down version of it.
   */
  /* m1 is reserved as 0 in Pineview, n is a ring counter */
- static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
+ static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
  {
        clock->m = clock->m2 + 2;
        clock->p = clock->p1 * clock->p2;
@@@ -598,7 -601,7 +601,7 @@@ static uint32_t i9xx_dpll_compute_m(str
        return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
  }
  
- static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
+ static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
  {
        clock->m = i9xx_dpll_compute_m(clock);
        clock->p = clock->p1 * clock->p2;
        return clock->dot;
  }
  
- static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
+ static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
  {
        clock->m = clock->m1 * clock->m2;
        clock->p = clock->p1 * clock->p2;
        return clock->dot / 5;
  }
  
- int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+ int chv_calc_dpll_params(int refclk, struct dpll *clock)
  {
        clock->m = clock->m1 * clock->m2;
        clock->p = clock->p1 * clock->p2;
   */
  
  static bool intel_PLL_is_valid(struct drm_device *dev,
-                              const intel_limit_t *limit,
-                              const intel_clock_t *clock)
+                              const struct intel_limit *limit,
+                              const struct dpll *clock)
  {
        if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
                INTELPllInvalid("n out of range\n");
  }
  
  static int
- i9xx_select_p2_div(const intel_limit_t *limit,
+ i9xx_select_p2_div(const struct intel_limit *limit,
                   const struct intel_crtc_state *crtc_state,
                   int target)
  {
   * divider from @match_clock used for LVDS downclocking.
   */
  static bool
- i9xx_find_best_dpll(const intel_limit_t *limit,
+ i9xx_find_best_dpll(const struct intel_limit *limit,
                    struct intel_crtc_state *crtc_state,
-                   int target, int refclk, intel_clock_t *match_clock,
-                   intel_clock_t *best_clock)
+                   int target, int refclk, struct dpll *match_clock,
+                   struct dpll *best_clock)
  {
        struct drm_device *dev = crtc_state->base.crtc->dev;
-       intel_clock_t clock;
+       struct dpll clock;
        int err = target;
  
        memset(best_clock, 0, sizeof(*best_clock));
   * divider from @match_clock used for LVDS downclocking.
   */
  static bool
- pnv_find_best_dpll(const intel_limit_t *limit,
+ pnv_find_best_dpll(const struct intel_limit *limit,
                   struct intel_crtc_state *crtc_state,
-                  int target, int refclk, intel_clock_t *match_clock,
-                  intel_clock_t *best_clock)
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
  {
        struct drm_device *dev = crtc_state->base.crtc->dev;
-       intel_clock_t clock;
+       struct dpll clock;
        int err = target;
  
        memset(best_clock, 0, sizeof(*best_clock));
   * divider from @match_clock used for LVDS downclocking.
   */
  static bool
- g4x_find_best_dpll(const intel_limit_t *limit,
+ g4x_find_best_dpll(const struct intel_limit *limit,
                   struct intel_crtc_state *crtc_state,
-                  int target, int refclk, intel_clock_t *match_clock,
-                  intel_clock_t *best_clock)
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
  {
        struct drm_device *dev = crtc_state->base.crtc->dev;
-       intel_clock_t clock;
+       struct dpll clock;
        int max_n;
        bool found = false;
        /* approximately equals target * 0.00585 */
   * best configuration and error found so far. Return the calculated error.
   */
  static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
-                              const intel_clock_t *calculated_clock,
-                              const intel_clock_t *best_clock,
+                              const struct dpll *calculated_clock,
+                              const struct dpll *best_clock,
                               unsigned int best_error_ppm,
                               unsigned int *error_ppm)
  {
   * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
   */
  static bool
- vlv_find_best_dpll(const intel_limit_t *limit,
+ vlv_find_best_dpll(const struct intel_limit *limit,
                   struct intel_crtc_state *crtc_state,
-                  int target, int refclk, intel_clock_t *match_clock,
-                  intel_clock_t *best_clock)
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
  {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
-       intel_clock_t clock;
+       struct dpll clock;
        unsigned int bestppm = 1000000;
        /* min update 19.2 MHz */
        int max_n = min(limit->n.max, refclk / 19200);
   * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
   */
  static bool
- chv_find_best_dpll(const intel_limit_t *limit,
+ chv_find_best_dpll(const struct intel_limit *limit,
                   struct intel_crtc_state *crtc_state,
-                  int target, int refclk, intel_clock_t *match_clock,
-                  intel_clock_t *best_clock)
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
  {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
        unsigned int best_error_ppm;
-       intel_clock_t clock;
+       struct dpll clock;
        uint64_t m2;
        int found = false;
  
  }
  
  bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
-                       intel_clock_t *best_clock)
+                       struct dpll *best_clock)
  {
        int refclk = 100000;
-       const intel_limit_t *limit = &intel_limits_bxt;
+       const struct intel_limit *limit = &intel_limits_bxt;
  
        return chv_find_best_dpll(limit, crtc_state,
                                  target_clock, refclk, NULL, best_clock);
@@@ -1203,7 -1206,7 +1206,7 @@@ static void assert_fdi_tx_pll_enabled(s
        u32 val;
  
        /* ILK FDI PLL is always enabled */
-       if (INTEL_INFO(dev_priv)->gen == 5)
+       if (IS_GEN5(dev_priv))
                return;
  
        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@@ -2309,7 -2312,7 +2312,7 @@@ err_pm
        return ret;
  }
  
static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
  {
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct i915_ggtt_view view;
@@@ -3110,17 -3113,12 +3113,12 @@@ intel_pipe_set_base_atomic(struct drm_c
        return -ENODEV;
  }
  
- static void intel_complete_page_flips(struct drm_device *dev)
+ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
  {
-       struct drm_crtc *crtc;
-       for_each_crtc(dev, crtc) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               enum plane plane = intel_crtc->plane;
+       struct intel_crtc *crtc;
  
-               intel_prepare_page_flip(dev, plane);
-               intel_finish_page_flip_plane(dev, plane);
-       }
+       for_each_intel_crtc(dev_priv->dev, crtc)
+               intel_finish_page_flip_cs(dev_priv, crtc->pipe);
  }
  
  static void intel_update_primary_planes(struct drm_device *dev)
        }
  }
  
- void intel_prepare_reset(struct drm_device *dev)
+ void intel_prepare_reset(struct drm_i915_private *dev_priv)
  {
        /* no reset support for gen2 */
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                return;
  
        /* reset doesn't touch the display */
-       if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
                return;
  
-       drm_modeset_lock_all(dev);
+       drm_modeset_lock_all(dev_priv->dev);
        /*
         * Disabling the crtcs gracefully seems nicer. Also the
         * g33 docs say we should at least disable all the planes.
         */
-       intel_display_suspend(dev);
+       intel_display_suspend(dev_priv->dev);
  }
  
- void intel_finish_reset(struct drm_device *dev)
+ void intel_finish_reset(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        /*
         * Flips in the rings will be nuked by the reset,
         * so complete all pending flips so that user space
         * will get its events and not get stuck.
         */
-       intel_complete_page_flips(dev);
+       intel_complete_page_flips(dev_priv);
  
        /* no reset support for gen2 */
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                return;
  
        /* reset doesn't touch the display */
-       if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
                /*
                 * Flips in the rings have been nuked by the reset,
                 * so update the base address of all primary
                 * FIXME: Atomic will make this obsolete since we won't schedule
                 * CS-based flips (which might get lost in gpu resets) any more.
                 */
-               intel_update_primary_planes(dev);
+               intel_update_primary_planes(dev_priv->dev);
                return;
        }
  
        intel_runtime_pm_disable_interrupts(dev_priv);
        intel_runtime_pm_enable_interrupts(dev_priv);
  
-       intel_modeset_init_hw(dev);
+       intel_modeset_init_hw(dev_priv->dev);
  
        spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev);
+               dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
  
-       intel_display_resume(dev);
+       intel_display_resume(dev_priv->dev);
  
        intel_hpd_init(dev_priv);
  
-       drm_modeset_unlock_all(dev);
+       drm_modeset_unlock_all(dev_priv->dev);
  }
  
  static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
                return false;
  
        spin_lock_irq(&dev->event_lock);
-       pending = to_intel_crtc(crtc)->unpin_work != NULL;
+       pending = to_intel_crtc(crtc)->flip_work != NULL;
        spin_unlock_irq(&dev->event_lock);
  
        return pending;
@@@ -3803,7 -3799,7 +3799,7 @@@ bool intel_has_pending_fb_unpin(struct 
                if (atomic_read(&crtc->unpin_work_count) == 0)
                        continue;
  
-               if (crtc->unpin_work)
+               if (crtc->flip_work)
                        intel_wait_for_vblank(dev, crtc->pipe);
  
                return true;
  static void page_flip_completed(struct intel_crtc *intel_crtc)
  {
        struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-       struct intel_unpin_work *work = intel_crtc->unpin_work;
+       struct intel_flip_work *work = intel_crtc->flip_work;
  
-       /* ensure that the unpin work is consistent wrt ->pending. */
-       smp_rmb();
-       intel_crtc->unpin_work = NULL;
+       intel_crtc->flip_work = NULL;
  
        if (work->event)
                drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
        drm_crtc_vblank_put(&intel_crtc->base);
  
        wake_up_all(&dev_priv->pending_flip_queue);
-       queue_work(dev_priv->wq, &work->work);
+       queue_work(dev_priv->wq, &work->unpin_work);
  
        trace_i915_flip_complete(intel_crtc->plane,
                                 work->pending_flip_obj);
@@@ -3851,9 -3845,11 +3845,11 @@@ static int intel_crtc_wait_for_pending_
  
        if (ret == 0) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               struct intel_flip_work *work;
  
                spin_lock_irq(&dev->event_lock);
-               if (intel_crtc->unpin_work) {
+               work = intel_crtc->flip_work;
+               if (work && !is_mmio_work(work)) {
                        WARN_ONCE(1, "Removing stuck page flip\n");
                        page_flip_completed(intel_crtc);
                }
@@@ -5328,32 -5324,33 +5324,33 @@@ static void intel_update_cdclk(struct d
                         dev_priv->cdclk_freq);
  
        /*
-        * Program the gmbus_freq based on the cdclk frequency.
-        * BSpec erroneously claims we should aim for 4MHz, but
-        * in fact 1MHz is the correct frequency.
+        * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
+        * Programmng [sic] note: bit[9:2] should be programmed to the number
+        * of cdclk that generates 4MHz reference clock freq which is used to
+        * generate GMBus clock. This will vary with the cdclk freq.
         */
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-               /*
-                * Program the gmbus_freq based on the cdclk frequency.
-                * BSpec erroneously claims we should aim for 4MHz, but
-                * in fact 1MHz is the correct frequency.
-                */
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-       }
  
        if (dev_priv->max_cdclk_freq == 0)
                intel_update_max_cdclk(dev);
  }
  
- static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
+ /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+ static int skl_cdclk_decimal(int cdclk)
+ {
+       return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
+ }
+ static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
  {
        uint32_t divider;
        uint32_t ratio;
-       uint32_t current_freq;
+       uint32_t current_cdclk;
        int ret;
  
        /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
-       switch (frequency) {
+       switch (cdclk) {
        case 144000:
                divider = BXT_CDCLK_CD2X_DIV_SEL_4;
                ratio = BXT_DE_PLL_RATIO(60);
                divider = 0;
                break;
        default:
-               DRM_ERROR("unsupported CDCLK freq %d", frequency);
+               DRM_ERROR("unsupported CDCLK freq %d", cdclk);
  
                return;
        }
  
        if (ret) {
                DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
-                         ret, frequency);
+                         ret, cdclk);
                return;
        }
  
-       current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
+       current_cdclk = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
        /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
-       current_freq = current_freq * 500 + 1000;
+       current_cdclk = current_cdclk * 500 + 1000;
  
        /*
         * DE PLL has to be disabled when
         * - before setting to 624MHz (PLL needs toggling)
         * - before setting to any frequency from 624MHz (PLL needs toggling)
         */
-       if (frequency == 19200 || frequency == 624000 ||
-           current_freq == 624000) {
+       if (cdclk == 19200 || cdclk == 624000 ||
+           current_cdclk == 624000) {
                I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
                /* Timeout 200us */
                if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
                        DRM_ERROR("timout waiting for DE PLL unlock\n");
        }
  
-       if (frequency != 19200) {
+       if (cdclk != 19200) {
                uint32_t val;
  
                val = I915_READ(BXT_DE_PLL_CTL);
                if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
                        DRM_ERROR("timeout waiting for DE PLL lock\n");
  
-               val = I915_READ(CDCLK_CTL);
-               val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
-               val |= divider;
+               val = divider | skl_cdclk_decimal(cdclk);
+               /*
+                * FIXME if only the cd2x divider needs changing, it could be done
+                * without shutting off the pipe (if only one pipe is active).
+                */
+               val |= BXT_CDCLK_CD2X_PIPE_NONE;
                /*
                 * Disable SSA Precharge when CD clock frequency < 500 MHz,
                 * enable otherwise.
                 */
-               val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-               if (frequency >= 500000)
+               if (cdclk >= 500000)
                        val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-               val &= ~CDCLK_FREQ_DECIMAL_MASK;
-               /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
-               val |= (frequency - 1000) / 500;
                I915_WRITE(CDCLK_CTL, val);
        }
  
        mutex_lock(&dev_priv->rps.hw_lock);
        ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-                                     DIV_ROUND_UP(frequency, 25000));
+                                     DIV_ROUND_UP(cdclk, 25000));
        mutex_unlock(&dev_priv->rps.hw_lock);
  
        if (ret) {
                DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
-                         ret, frequency);
+                         ret, cdclk);
                return;
        }
  
@@@ -5545,11 -5540,6 +5540,6 @@@ static const struct skl_cdclk_entry 
        { .freq = 675000, .vco = 8100 },
  };
  
- static unsigned int skl_cdclk_decimal(unsigned int freq)
- {
-       return (freq - 1000) / 500;
- }
  static unsigned int skl_cdclk_get_vco(unsigned int freq)
  {
        unsigned int i;
  }
  
  static void
- skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
+ skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
  {
-       unsigned int min_freq;
+       int min_cdclk;
        u32 val;
  
        /* select the minimum CDCLK before enabling DPLL 0 */
-       val = I915_READ(CDCLK_CTL);
-       val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
-       val |= CDCLK_FREQ_337_308;
-       if (required_vco == 8640)
-               min_freq = 308570;
+       if (vco == 8640)
+               min_cdclk = 308570;
        else
-               min_freq = 337500;
-       val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
+               min_cdclk = 337500;
  
+       val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
        I915_WRITE(CDCLK_CTL, val);
        POSTING_READ(CDCLK_CTL);
  
        val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
                 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
        val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-       if (required_vco == 8640)
+       if (vco == 8640)
                val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
                                            SKL_DPLL0);
        else
                DRM_ERROR("DPLL0 not locked\n");
  }
  
+ static void
+ skl_dpll0_disable(struct drm_i915_private *dev_priv)
+ {
+       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+       if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
+               DRM_ERROR("Couldn't disable DPLL0\n");
+ }
  static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
  {
        int ret;
@@@ -5642,12 -5635,12 +5635,12 @@@ static bool skl_cdclk_wait_for_pcu_read
        return false;
  }
  
- static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
+ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
  {
        struct drm_device *dev = dev_priv->dev;
        u32 freq_select, pcu_ack;
  
-       DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
+       DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", cdclk);
  
        if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
                DRM_ERROR("failed to inform PCU about cdclk change\n");
        }
  
        /* set CDCLK_CTL */
-       switch(freq) {
+       switch (cdclk) {
        case 450000:
        case 432000:
                freq_select = CDCLK_FREQ_450_432;
                break;
        }
  
-       I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
+       I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
        POSTING_READ(CDCLK_CTL);
  
        /* inform PCU of the change */
@@@ -5700,21 -5693,18 +5693,18 @@@ void skl_uninit_cdclk(struct drm_i915_p
        if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
                DRM_ERROR("DBuf power disable timeout\n");
  
-       /* disable DPLL0 */
-       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
-       if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
-               DRM_ERROR("Couldn't disable DPLL0\n");
+       skl_dpll0_disable(dev_priv);
  }
  
  void skl_init_cdclk(struct drm_i915_private *dev_priv)
  {
-       unsigned int required_vco;
+       unsigned int vco;
  
        /* DPLL0 not enabled (happens on early BIOS versions) */
        if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
                /* enable DPLL0 */
-               required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
-               skl_dpll0_enable(dev_priv, required_vco);
+               vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+               skl_dpll0_enable(dev_priv, vco);
        }
  
        /* set CDCLK to the frequency the BIOS chose */
@@@ -5906,21 -5896,19 +5896,19 @@@ static int valleyview_calc_cdclk(struc
                return 200000;
  }
  
- static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
-                             int max_pixclk)
+ static int broxton_calc_cdclk(int max_pixclk)
  {
        /*
         * FIXME:
-        * - remove the guardband, it's not needed on BXT
         * - set 19.2MHz bypass frequency if there are no active pipes
         */
-       if (max_pixclk > 576000*9/10)
+       if (max_pixclk > 576000)
                return 624000;
-       else if (max_pixclk > 384000*9/10)
+       else if (max_pixclk > 384000)
                return 576000;
-       else if (max_pixclk > 288000*9/10)
+       else if (max_pixclk > 288000)
                return 384000;
-       else if (max_pixclk > 144000*9/10)
+       else if (max_pixclk > 144000)
                return 288000;
        else
                return 144000;
@@@ -5963,9 -5951,6 +5951,6 @@@ static int valleyview_modeset_calc_cdcl
        struct intel_atomic_state *intel_state =
                to_intel_atomic_state(state);
  
-       if (max_pixclk < 0)
-               return max_pixclk;
        intel_state->cdclk = intel_state->dev_cdclk =
                valleyview_calc_cdclk(dev_priv, max_pixclk);
  
  
  static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
  {
-       struct drm_device *dev = state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int max_pixclk = intel_mode_max_pixclk(dev, state);
+       int max_pixclk = ilk_max_pixel_rate(state);
        struct intel_atomic_state *intel_state =
                to_intel_atomic_state(state);
  
-       if (max_pixclk < 0)
-               return max_pixclk;
        intel_state->cdclk = intel_state->dev_cdclk =
-               broxton_calc_cdclk(dev_priv, max_pixclk);
+               broxton_calc_cdclk(max_pixclk);
  
        if (!intel_state->active_crtcs)
-               intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+               intel_state->dev_cdclk = broxton_calc_cdclk(0);
  
        return 0;
  }
@@@ -6252,7 -6232,7 +6232,7 @@@ static void intel_crtc_disable_noatomic
                return;
  
        if (to_intel_plane_state(crtc->primary->state)->visible) {
-               WARN_ON(intel_crtc->unpin_work);
+               WARN_ON(intel_crtc->flip_work);
  
                intel_pre_disable_primary_noatomic(crtc);
  
@@@ -7063,7 -7043,7 +7043,7 @@@ static uint32_t i9xx_dpll_compute_fp(st
  
  static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
                                     struct intel_crtc_state *crtc_state,
-                                    intel_clock_t *reduced_clock)
+                                    struct dpll *reduced_clock)
  {
        struct drm_device *dev = crtc->base.dev;
        u32 fp, fp2 = 0;
@@@ -7487,7 -7467,7 +7467,7 @@@ void vlv_force_pll_off(struct drm_devic
  
  static void i9xx_compute_dpll(struct intel_crtc *crtc,
                              struct intel_crtc_state *crtc_state,
-                             intel_clock_t *reduced_clock)
+                             struct dpll *reduced_clock)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
  
  static void i8xx_compute_dpll(struct intel_crtc *crtc,
                              struct intel_crtc_state *crtc_state,
-                             intel_clock_t *reduced_clock)
+                             struct dpll *reduced_clock)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -7817,7 -7797,7 +7797,7 @@@ static int i8xx_crtc_compute_clock(stru
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const intel_limit_t *limit;
+       const struct intel_limit *limit;
        int refclk = 48000;
  
        memset(&crtc_state->dpll_hw_state, 0,
@@@ -7853,7 -7833,7 +7833,7 @@@ static int g4x_crtc_compute_clock(struc
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const intel_limit_t *limit;
+       const struct intel_limit *limit;
        int refclk = 96000;
  
        memset(&crtc_state->dpll_hw_state, 0,
@@@ -7896,7 -7876,7 +7876,7 @@@ static int pnv_crtc_compute_clock(struc
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const intel_limit_t *limit;
+       const struct intel_limit *limit;
        int refclk = 96000;
  
        memset(&crtc_state->dpll_hw_state, 0,
@@@ -7930,7 -7910,7 +7910,7 @@@ static int i9xx_crtc_compute_clock(stru
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const intel_limit_t *limit;
+       const struct intel_limit *limit;
        int refclk = 96000;
  
        memset(&crtc_state->dpll_hw_state, 0,
@@@ -7963,7 -7943,7 +7943,7 @@@ static int chv_crtc_compute_clock(struc
                                  struct intel_crtc_state *crtc_state)
  {
        int refclk = 100000;
-       const intel_limit_t *limit = &intel_limits_chv;
+       const struct intel_limit *limit = &intel_limits_chv;
  
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
@@@ -7984,7 -7964,7 +7964,7 @@@ static int vlv_crtc_compute_clock(struc
                                  struct intel_crtc_state *crtc_state)
  {
        int refclk = 100000;
-       const intel_limit_t *limit = &intel_limits_vlv;
+       const struct intel_limit *limit = &intel_limits_vlv;
  
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
@@@ -8034,7 -8014,7 +8014,7 @@@ static void vlv_crtc_clock_get(struct i
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = pipe_config->cpu_transcoder;
-       intel_clock_t clock;
+       struct dpll clock;
        u32 mdiv;
        int refclk = 100000;
  
@@@ -8131,7 -8111,7 +8111,7 @@@ static void chv_crtc_clock_get(struct i
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = pipe_config->cpu_transcoder;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
-       intel_clock_t clock;
+       struct dpll clock;
        u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
        int refclk = 100000;
  
@@@ -8794,7 -8774,7 +8774,7 @@@ static bool ironlake_needs_fb_cb_tune(s
  
  static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                                  struct intel_crtc_state *crtc_state,
-                                 intel_clock_t *reduced_clock)
+                                 struct dpll *reduced_clock)
  {
        struct drm_crtc *crtc = &intel_crtc->base;
        struct drm_device *dev = crtc->dev;
@@@ -8902,10 -8882,10 +8882,10 @@@ static int ironlake_crtc_compute_clock(
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       intel_clock_t reduced_clock;
+       struct dpll reduced_clock;
        bool has_reduced_clock = false;
        struct intel_shared_dpll *pll;
-       const intel_limit_t *limit;
+       const struct intel_limit *limit;
        int refclk = 120000;
  
        memset(&crtc_state->dpll_hw_state, 0,
@@@ -9300,6 -9280,10 +9280,10 @@@ static bool ironlake_get_pipe_config(st
                ironlake_get_fdi_m_n_config(crtc, pipe_config);
  
                if (HAS_PCH_IBX(dev_priv)) {
+                       /*
+                        * The pipe->pch transcoder and pch transcoder->pll
+                        * mapping is fixed.
+                        */
                        pll_id = (enum intel_dpll_id) crtc->pipe;
                } else {
                        tmp = I915_READ(PCH_DPLL_SEL);
@@@ -9687,6 -9671,18 +9671,18 @@@ static void broadwell_set_cdclk(struct 
             cdclk, dev_priv->cdclk_freq);
  }
  
+ static int broadwell_calc_cdclk(int max_pixclk)
+ {
+       if (max_pixclk > 540000)
+               return 675000;
+       else if (max_pixclk > 450000)
+               return 540000;
+       else if (max_pixclk > 337500)
+               return 450000;
+       else
+               return 337500;
+ }
  static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
  {
        struct drm_i915_private *dev_priv = to_i915(state->dev);
         * FIXME should also account for plane ratio
         * once 64bpp pixel formats are supported.
         */
-       if (max_pixclk > 540000)
-               cdclk = 675000;
-       else if (max_pixclk > 450000)
-               cdclk = 540000;
-       else if (max_pixclk > 337500)
-               cdclk = 450000;
-       else
-               cdclk = 337500;
+       cdclk = broadwell_calc_cdclk(max_pixclk);
  
        if (cdclk > dev_priv->max_cdclk_freq) {
                DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
  
        intel_state->cdclk = intel_state->dev_cdclk = cdclk;
        if (!intel_state->active_crtcs)
-               intel_state->dev_cdclk = 337500;
+               intel_state->dev_cdclk = broadwell_calc_cdclk(0);
  
        return 0;
  }
@@@ -9850,6 -9839,10 +9839,10 @@@ static bool hsw_get_transcoder_state(st
        enum intel_display_power_domain power_domain;
        u32 tmp;
  
+       /*
+        * The pipe->transcoder mapping is fixed with the exception of the eDP
+        * transcoder handled below.
+        */
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  
        /*
@@@ -10317,10 -10310,10 +10310,10 @@@ intel_framebuffer_create_for_mode(struc
        struct drm_i915_gem_object *obj;
        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  
-       obj = i915_gem_alloc_object(dev,
+       obj = i915_gem_object_create(dev,
                                    intel_framebuffer_size_for_mode(mode, bpp));
-       if (obj == NULL)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
  
        mode_cmd.width = mode->hdisplay;
        mode_cmd.height = mode->vdisplay;
@@@ -10632,7 -10625,7 +10625,7 @@@ static void i9xx_crtc_clock_get(struct 
        int pipe = pipe_config->cpu_transcoder;
        u32 dpll = pipe_config->dpll_hw_state.dpll;
        u32 fp;
-       intel_clock_t clock;
+       struct dpll clock;
        int port_clock;
        int refclk = i9xx_pll_refclk(dev, pipe_config);
  
@@@ -10806,31 -10799,27 +10799,27 @@@ struct drm_display_mode *intel_crtc_mod
        return mode;
  }
  
- void intel_mark_busy(struct drm_device *dev)
+ void intel_mark_busy(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        if (dev_priv->mm.busy)
                return;
  
        intel_runtime_pm_get(dev_priv);
        i915_update_gfx_val(dev_priv);
-       if (INTEL_INFO(dev)->gen >= 6)
+       if (INTEL_GEN(dev_priv) >= 6)
                gen6_rps_busy(dev_priv);
        dev_priv->mm.busy = true;
  }
  
- void intel_mark_idle(struct drm_device *dev)
+ void intel_mark_idle(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        if (!dev_priv->mm.busy)
                return;
  
        dev_priv->mm.busy = false;
  
-       if (INTEL_INFO(dev)->gen >= 6)
-               gen6_rps_idle(dev->dev_private);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_idle(dev_priv);
  
        intel_runtime_pm_put(dev_priv);
  }
@@@ -10839,15 -10828,16 +10828,16 @@@ static void intel_crtc_destroy(struct d
  {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct intel_unpin_work *work;
+       struct intel_flip_work *work;
  
        spin_lock_irq(&dev->event_lock);
-       work = intel_crtc->unpin_work;
-       intel_crtc->unpin_work = NULL;
+       work = intel_crtc->flip_work;
+       intel_crtc->flip_work = NULL;
        spin_unlock_irq(&dev->event_lock);
  
        if (work) {
-               cancel_work_sync(&work->work);
+               cancel_work_sync(&work->mmio_work);
+               cancel_work_sync(&work->unpin_work);
                kfree(work);
        }
  
  
  static void intel_unpin_work_fn(struct work_struct *__work)
  {
-       struct intel_unpin_work *work =
-               container_of(__work, struct intel_unpin_work, work);
+       struct intel_flip_work *work =
+               container_of(__work, struct intel_flip_work, unpin_work);
        struct intel_crtc *crtc = to_intel_crtc(work->crtc);
        struct drm_device *dev = crtc->base.dev;
        struct drm_plane *primary = crtc->base.primary;
  
+       if (is_mmio_work(work))
+               flush_work(&work->mmio_work);
        mutex_lock(&dev->struct_mutex);
        intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
        drm_gem_object_unreference(&work->pending_flip_obj->base);
        kfree(work);
  }
  
- static void do_intel_finish_page_flip(struct drm_device *dev,
-                                     struct drm_crtc *crtc)
- {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_unpin_work *work;
-       unsigned long flags;
-       /* Ignore early vblank irqs */
-       if (intel_crtc == NULL)
-               return;
-       /*
-        * This is called both by irq handlers and the reset code (to complete
-        * lost pageflips) so needs the full irqsave spinlocks.
-        */
-       spin_lock_irqsave(&dev->event_lock, flags);
-       work = intel_crtc->unpin_work;
-       /* Ensure we don't miss a work->pending update ... */
-       smp_rmb();
-       if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-               return;
-       }
-       page_flip_completed(intel_crtc);
-       spin_unlock_irqrestore(&dev->event_lock, flags);
- }
- void intel_finish_page_flip(struct drm_device *dev, int pipe)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-       do_intel_finish_page_flip(dev, crtc);
- }
- void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
-       do_intel_finish_page_flip(dev, crtc);
- }
  /* Is 'a' after or equal to 'b'? */
  static bool g4x_flip_count_after_eq(u32 a, u32 b)
  {
        return !((a - b) & 0x80000000);
  }
  
- static bool page_flip_finished(struct intel_crtc *crtc)
+ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
+                                  struct intel_flip_work *work)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
         * anyway, we don't really care.
         */
        return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
-               crtc->unpin_work->gtt_offset &&
+               crtc->flip_work->gtt_offset &&
                g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
-                                   crtc->unpin_work->flip_count);
+                                   crtc->flip_work->flip_count);
  }
  
- void intel_prepare_page_flip(struct drm_device *dev, int plane)
+ static bool
+ __pageflip_finished_mmio(struct intel_crtc *crtc,
+                              struct intel_flip_work *work)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+       /*
+        * MMIO work completes when vblank is different from
+        * flip_queued_vblank.
+        *
+        * Reset counter value doesn't matter, this is handled by
+        * i915_wait_request finishing early, so no need to handle
+        * reset here.
+        */
+       return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
+ }
+ static bool pageflip_finished(struct intel_crtc *crtc,
+                             struct intel_flip_work *work)
+ {
+       if (!atomic_read(&work->pending))
+               return false;
+       smp_rmb();
+       if (is_mmio_work(work))
+               return __pageflip_finished_mmio(crtc, work);
+       else
+               return __pageflip_finished_cs(crtc, work);
+ }
+ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
+ {
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_flip_work *work;
        unsigned long flags;
  
+       /* Ignore early vblank irqs */
+       if (!crtc)
+               return;
+       /*
+        * This is called both by irq handlers and the reset code (to complete
+        * lost pageflips) so needs the full irqsave spinlocks.
+        */
+       spin_lock_irqsave(&dev->event_lock, flags);
+       work = intel_crtc->flip_work;
+       if (work != NULL &&
+           !is_mmio_work(work) &&
+           pageflip_finished(intel_crtc, work))
+               page_flip_completed(intel_crtc);
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
+ {
+       struct drm_device *dev = dev_priv->dev;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_flip_work *work;
+       unsigned long flags;
+       /* Ignore early vblank irqs */
+       if (!crtc)
+               return;
  
        /*
         * This is called both by irq handlers and the reset code (to complete
         * lost pageflips) so needs the full irqsave spinlocks.
-        *
-        * NB: An MMIO update of the plane base pointer will also
-        * generate a page-flip completion irq, i.e. every modeset
-        * is also accompanied by a spurious intel_prepare_page_flip().
         */
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
-               atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+       work = intel_crtc->flip_work;
+       if (work != NULL &&
+           is_mmio_work(work) &&
+           pageflip_finished(intel_crtc, work))
+               page_flip_completed(intel_crtc);
        spin_unlock_irqrestore(&dev->event_lock, flags);
  }
  
- static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
+ static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
+                                              struct intel_flip_work *work)
  {
+       work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
        /* Ensure that the work item is consistent when activating it ... */
-       smp_wmb();
-       atomic_set(&work->pending, INTEL_FLIP_PENDING);
-       /* and that it is marked active as soon as the irq could fire. */
-       smp_wmb();
+       smp_mb__before_atomic();
+       atomic_set(&work->pending, 1);
  }
  
  static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(engine, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
        intel_ring_emit(engine, 0); /* aux display base address, unused */
  
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
  }
  
@@@ -11073,10 -11082,9 +11082,9 @@@ static int intel_gen3_queue_flip(struc
        intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
        intel_ring_emit(engine, MI_NOOP);
  
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
  }
  
@@@ -11104,7 -11112,7 +11112,7 @@@ static int intel_gen4_queue_flip(struc
        intel_ring_emit(engine, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
                        obj->tiling_mode);
  
        /* XXX Enabling the panel-fitter across page-flip is so far
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(engine, pf | pipesrc);
  
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
  }
  
@@@ -11139,7 -11146,7 +11146,7 @@@ static int intel_gen6_queue_flip(struc
        intel_ring_emit(engine, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
  
        /* Contrary to the suggestions in the documentation,
         * "Enable Panel Fitter" does not seem to be required when page
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(engine, pf | pipesrc);
  
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
  }
  
@@@ -11243,10 -11249,9 +11249,9 @@@ static int intel_gen7_queue_flip(struc
  
        intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
        intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
        intel_ring_emit(engine, (MI_NOOP));
  
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
  }
  
@@@ -11264,7 -11269,7 +11269,7 @@@ static bool use_mmio_flip(struct intel_
        if (engine == NULL)
                return true;
  
-       if (INTEL_INFO(engine->dev)->gen < 5)
+       if (INTEL_GEN(engine->i915) < 5)
                return false;
  
        if (i915.use_mmio_flip < 0)
  
  static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
                             unsigned int rotation,
-                            struct intel_unpin_work *work)
+                            struct intel_flip_work *work)
  {
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
  }
  
  static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
-                            struct intel_unpin_work *work)
+                            struct intel_flip_work *work)
  {
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        POSTING_READ(DSPSURF(intel_crtc->plane));
  }
  
- /*
-  * XXX: This is the temporary way to update the plane registers until we get
-  * around to using the usual plane update functions for MMIO flips
-  */
- static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
- {
-       struct intel_crtc *crtc = mmio_flip->crtc;
-       struct intel_unpin_work *work;
-       spin_lock_irq(&crtc->base.dev->event_lock);
-       work = crtc->unpin_work;
-       spin_unlock_irq(&crtc->base.dev->event_lock);
-       if (work == NULL)
-               return;
-       intel_mark_page_flip_active(work);
-       intel_pipe_update_start(crtc);
-       if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
-               skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
-       else
-               /* use_mmio_flip() retricts MMIO flips to ilk+ */
-               ilk_do_mmio_flip(crtc, work);
-       intel_pipe_update_end(crtc);
- }
- static void intel_mmio_flip_work_func(struct work_struct *work)
+ static void intel_mmio_flip_work_func(struct work_struct *w)
  {
-       struct intel_mmio_flip *mmio_flip =
-               container_of(work, struct intel_mmio_flip, work);
+       struct intel_flip_work *work =
+               container_of(w, struct intel_flip_work, mmio_work);
+       struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_framebuffer *intel_fb =
-               to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
+               to_intel_framebuffer(crtc->base.primary->fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
  
-       if (mmio_flip->req) {
-               WARN_ON(__i915_wait_request(mmio_flip->req,
+       if (work->flip_queued_req)
+               WARN_ON(__i915_wait_request(work->flip_queued_req,
                                            false, NULL,
-                                           &mmio_flip->i915->rps.mmioflips));
-               i915_gem_request_unreference__unlocked(mmio_flip->req);
-       }
+                                           &dev_priv->rps.mmioflips));
  
        /* For framebuffer backed by dmabuf, wait for fence */
        if (obj->base.dma_buf)
                                                            false, false,
                                                            MAX_SCHEDULE_TIMEOUT) < 0);
  
-       intel_do_mmio_flip(mmio_flip);
-       kfree(mmio_flip);
- }
- static int intel_queue_mmio_flip(struct drm_device *dev,
-                                struct drm_crtc *crtc,
-                                struct drm_i915_gem_object *obj)
- {
-       struct intel_mmio_flip *mmio_flip;
-       mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
-       if (mmio_flip == NULL)
-               return -ENOMEM;
-       mmio_flip->i915 = to_i915(dev);
-       mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
-       mmio_flip->crtc = to_intel_crtc(crtc);
-       mmio_flip->rotation = crtc->primary->state->rotation;
+       intel_pipe_update_start(crtc);
  
-       INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
-       schedule_work(&mmio_flip->work);
+       if (INTEL_GEN(dev_priv) >= 9)
+               skl_do_mmio_flip(crtc, work->rotation, work);
+       else
+               /* use_mmio_flip() retricts MMIO flips to ilk+ */
+               ilk_do_mmio_flip(crtc, work);
  
-       return 0;
+       intel_pipe_update_end(crtc, work);
  }
  
  static int intel_default_queue_flip(struct drm_device *dev,
        return -ENODEV;
  }
  
- static bool __intel_pageflip_stall_check(struct drm_device *dev,
-                                        struct drm_crtc *crtc)
+ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
+                                     struct intel_crtc *intel_crtc,
+                                     struct intel_flip_work *work)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_unpin_work *work = intel_crtc->unpin_work;
-       u32 addr;
-       if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
-               return true;
+       u32 addr, vblank;
  
-       if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
+       if (!atomic_read(&work->pending))
                return false;
  
-       if (!work->enable_stall_check)
-               return false;
+       smp_rmb();
  
+       vblank = intel_crtc_get_vblank_counter(intel_crtc);
        if (work->flip_ready_vblank == 0) {
                if (work->flip_queued_req &&
                    !i915_gem_request_completed(work->flip_queued_req, true))
                        return false;
  
-               work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
+               work->flip_ready_vblank = vblank;
        }
  
-       if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
+       if (vblank - work->flip_ready_vblank < 3)
                return false;
  
        /* Potential stall - if we see that the flip has happened,
         * assume a missed interrupt. */
-       if (INTEL_INFO(dev)->gen >= 4)
+       if (INTEL_GEN(dev_priv) >= 4)
                addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
        else
                addr = I915_READ(DSPADDR(intel_crtc->plane));
        return addr == work->gtt_offset;
  }
  
- void intel_check_page_flip(struct drm_device *dev, int pipe)
+ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_unpin_work *work;
+       struct intel_flip_work *work;
  
        WARN_ON(!in_interrupt());
  
                return;
  
        spin_lock(&dev->event_lock);
-       work = intel_crtc->unpin_work;
-       if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
-               WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
-                        work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+       work = intel_crtc->flip_work;
+       if (work != NULL && !is_mmio_work(work) &&
+           __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+               WARN_ONCE(1,
+                         "Kicking stuck page flip: queued at %d, now %d\n",
+                       work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
                page_flip_completed(intel_crtc);
                work = NULL;
        }
-       if (work != NULL &&
-           drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
-               intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
+       if (work != NULL && !is_mmio_work(work) &&
+           intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+               intel_queue_rps_boost_for_request(work->flip_queued_req);
        spin_unlock(&dev->event_lock);
  }
  
@@@ -11522,7 -11484,7 +11484,7 @@@ static int intel_crtc_page_flip(struct 
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_plane *primary = crtc->primary;
        enum pipe pipe = intel_crtc->pipe;
-       struct intel_unpin_work *work;
+       struct intel_flip_work *work;
        struct intel_engine_cs *engine;
        bool mmio_flip;
        struct drm_i915_gem_request *request = NULL;
        work->event = event;
        work->crtc = crtc;
        work->old_fb = old_fb;
-       INIT_WORK(&work->work, intel_unpin_work_fn);
+       INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
  
        ret = drm_crtc_vblank_get(crtc);
        if (ret)
                goto free_work;
  
-       /* We borrow the event spin lock for protecting unpin_work */
+       /* We borrow the event spin lock for protecting flip_work */
        spin_lock_irq(&dev->event_lock);
-       if (intel_crtc->unpin_work) {
+       if (intel_crtc->flip_work) {
                /* Before declaring the flip queue wedged, check if
                 * the hardware completed the operation behind our backs.
                 */
-               if (__intel_pageflip_stall_check(dev, crtc)) {
+               if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
                        DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
                        page_flip_completed(intel_crtc);
                } else {
                        return -EBUSY;
                }
        }
-       intel_crtc->unpin_work = work;
+       intel_crtc->flip_work = work;
        spin_unlock_irq(&dev->event_lock);
  
        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
         */
        if (!mmio_flip) {
                ret = i915_gem_object_sync(obj, engine, &request);
+               if (!ret && !request) {
+                       request = i915_gem_request_alloc(engine, NULL);
+                       ret = PTR_ERR_OR_ZERO(request);
+               }
                if (ret)
                        goto cleanup_pending;
        }
        work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
                                                  obj, 0);
        work->gtt_offset += intel_crtc->dspaddr_offset;
+       work->rotation = crtc->primary->state->rotation;
  
        if (mmio_flip) {
-               ret = intel_queue_mmio_flip(dev, crtc, obj);
-               if (ret)
-                       goto cleanup_unpin;
+               INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
  
                i915_gem_request_assign(&work->flip_queued_req,
                                        obj->last_write_req);
-       } else {
-               if (!request) {
-                       request = i915_gem_request_alloc(engine, NULL);
-                       if (IS_ERR(request)) {
-                               ret = PTR_ERR(request);
-                               goto cleanup_unpin;
-                       }
-               }
  
+               schedule_work(&work->mmio_work);
+       } else {
+               i915_gem_request_assign(&work->flip_queued_req, request);
                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
                                                   page_flip_flags);
                if (ret)
                        goto cleanup_unpin;
  
-               i915_gem_request_assign(&work->flip_queued_req, request);
-       }
+               intel_mark_page_flip_active(intel_crtc, work);
  
-       if (request)
                i915_add_request_no_flush(request);
+       }
  
-       work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
-       work->enable_stall_check = true;
-       i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
+       i915_gem_track_fb(intel_fb_obj(old_fb), obj,
                          to_intel_plane(primary)->frontbuffer_bit);
        mutex_unlock(&dev->struct_mutex);
  
@@@ -11706,7 -11663,7 +11663,7 @@@ cleanup
        drm_framebuffer_unreference(work->old_fb);
  
        spin_lock_irq(&dev->event_lock);
-       intel_crtc->unpin_work = NULL;
+       intel_crtc->flip_work = NULL;
        spin_unlock_irq(&dev->event_lock);
  
        drm_crtc_vblank_put(crtc);
@@@ -11834,6 -11791,11 +11791,11 @@@ int intel_plane_atomic_calc_changes(str
         * Visibility is calculated as if the crtc was on, but
         * after scaler setup everything depends on it being off
         * when the crtc isn't active.
+        *
+        * FIXME this is wrong for watermarks. Watermarks should also
+        * be computed as if the pipe would be active. Perhaps move
+        * per-plane wm computation to the .check_plane() hook, and
+        * only combine the results from all planes in the current place?
         */
        if (!is_crtc_enabled)
                to_intel_plane_state(plane_state)->visible = visible = false;
@@@ -12007,7 -11969,7 +11969,7 @@@ static int intel_crtc_atomic_check(stru
                }
        } else if (dev_priv->display.compute_intermediate_wm) {
                if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
-                       pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
+                       pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
        }
  
        if (INTEL_INFO(dev)->gen >= 9) {
@@@ -12926,7 -12888,7 +12888,7 @@@ verify_crtc_state(struct drm_crtc *crtc
        bool active;
  
        old_state = old_crtc_state->state;
 -      __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
 +      __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
        pipe_config = to_intel_crtc_state(old_crtc_state);
        memset(pipe_config, 0, sizeof(*pipe_config));
        pipe_config->base.crtc = crtc;
@@@ -13280,6 -13242,9 +13242,9 @@@ static int intel_modeset_checks(struct 
                        intel_state->active_crtcs |= 1 << i;
                else
                        intel_state->active_crtcs &= ~(1 << i);
+               if (crtc_state->active != crtc->state->active)
+                       intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
        }
  
        /*
   * phase.  The code here should be run after the per-crtc and per-plane 'check'
   * handlers to ensure that all derived state has been updated.
   */
- static void calc_watermark_data(struct drm_atomic_state *state)
+ static int calc_watermark_data(struct drm_atomic_state *state)
  {
        struct drm_device *dev = state->dev;
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *cstate;
-       struct drm_plane *plane;
-       struct drm_plane_state *pstate;
-       /*
-        * Calculate watermark configuration details now that derived
-        * plane/crtc state is all properly updated.
-        */
-       drm_for_each_crtc(crtc, dev) {
-               cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
-                       crtc->state;
-               if (cstate->active)
-                       intel_state->wm_config.num_pipes_active++;
-       }
-       drm_for_each_legacy_plane(plane, dev) {
-               pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
-                       plane->state;
+       struct drm_i915_private *dev_priv = to_i915(dev);
  
-               if (!to_intel_plane_state(pstate)->visible)
-                       continue;
+       /* Is there platform-specific watermark information to calculate? */
+       if (dev_priv->display.compute_global_watermarks)
+               return dev_priv->display.compute_global_watermarks(state);
  
-               intel_state->wm_config.sprites_enabled = true;
-               if (pstate->crtc_w != pstate->src_w >> 16 ||
-                   pstate->crtc_h != pstate->src_h >> 16)
-                       intel_state->wm_config.sprites_scaled = true;
-       }
+       return 0;
  }
  
  /**
@@@ -13377,14 -13320,13 +13320,13 @@@ static int intel_atomic_check(struct dr
                if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
                        crtc_state->mode_changed = true;
  
-               if (!crtc_state->enable) {
-                       if (needs_modeset(crtc_state))
-                               any_ms = true;
+               if (!needs_modeset(crtc_state))
                        continue;
-               }
  
-               if (!needs_modeset(crtc_state))
+               if (!crtc_state->enable) {
+                       any_ms = true;
                        continue;
+               }
  
                /* FIXME: For only active_changed we shouldn't need to do any
                 * state recomputation at all. */
                        return ret;
  
                ret = intel_modeset_pipe_config(crtc, pipe_config);
-               if (ret)
+               if (ret) {
+                       intel_dump_pipe_config(to_intel_crtc(crtc),
+                                              pipe_config, "[failed]");
                        return ret;
+               }
  
                if (i915.fastboot &&
                    intel_pipe_config_compare(dev,
                        to_intel_crtc_state(crtc_state)->update_pipe = true;
                }
  
-               if (needs_modeset(crtc_state)) {
+               if (needs_modeset(crtc_state))
                        any_ms = true;
  
-                       ret = drm_atomic_add_affected_planes(state, crtc);
-                       if (ret)
-                               return ret;
-               }
+               ret = drm_atomic_add_affected_planes(state, crtc);
+               if (ret)
+                       return ret;
  
                intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
                                       needs_modeset(crtc_state) ?
                return ret;
  
        intel_fbc_choose_crtc(dev_priv, state);
-       calc_watermark_data(state);
-       return 0;
+       return calc_watermark_data(state);
  }
  
  static int intel_atomic_prepare_commit(struct drm_device *dev,
        return ret;
  }
  
+ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+ {
+       struct drm_device *dev = crtc->base.dev;
+       if (!dev->max_vblank_count)
+               return drm_accurate_vblank_count(&crtc->base);
+       return dev->driver->get_vblank_counter(dev, crtc->pipe);
+ }
  static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
                                          struct drm_i915_private *dev_priv,
                                          unsigned crtc_mask)
@@@ -13597,7 -13549,8 +13549,8 @@@ static int intel_atomic_commit(struct d
        }
  
        drm_atomic_helper_swap_state(dev, state);
-       dev_priv->wm.config = intel_state->wm_config;
+       dev_priv->wm.distrust_bios_wm = false;
+       dev_priv->wm.skl_results = intel_state->wm_results;
        intel_shared_dpll_commit(state);
  
        if (intel_state->modeset) {
@@@ -14006,7 -13959,7 +13959,7 @@@ static void intel_finish_crtc_commit(st
  {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
-       intel_pipe_update_end(intel_crtc);
+       intel_pipe_update_end(intel_crtc, NULL);
  }
  
  /**
@@@ -14885,7 -14838,8 +14838,7 @@@ intel_user_framebuffer_create(struct dr
        struct drm_i915_gem_object *obj;
        struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
 -                                              mode_cmd.handles[0]));
 +      obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
        if (&obj->base == NULL)
                return ERR_PTR(-ENOENT);
  
@@@ -15050,12 -15004,13 +15003,13 @@@ void intel_init_display_hooks(struct dr
                dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                dev_priv->display.fdi_link_train = hsw_fdi_link_train;
-               if (IS_BROADWELL(dev_priv)) {
-                       dev_priv->display.modeset_commit_cdclk =
-                               broadwell_modeset_commit_cdclk;
-                       dev_priv->display.modeset_calc_cdclk =
-                               broadwell_modeset_calc_cdclk;
-               }
+       }
+       if (IS_BROADWELL(dev_priv)) {
+               dev_priv->display.modeset_commit_cdclk =
+                       broadwell_modeset_commit_cdclk;
+               dev_priv->display.modeset_calc_cdclk =
+                       broadwell_modeset_calc_cdclk;
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                dev_priv->display.modeset_commit_cdclk =
                        valleyview_modeset_commit_cdclk;
@@@ -15293,7 -15248,7 +15247,7 @@@ void intel_modeset_init_hw(struct drm_d
        dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
  
        intel_init_clock_gating(dev);
-       intel_enable_gt_powersave(dev);
+       intel_enable_gt_powersave(dev_priv);
  }
  
  /*
@@@ -15363,7 -15318,6 +15317,6 @@@ retry
        }
  
        /* Write calculated watermark values back */
-       to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
        for_each_crtc_in_state(state, crtc, cstate, i) {
                struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
  
@@@ -15461,7 -15415,6 +15414,6 @@@ void intel_modeset_init(struct drm_devi
        }
  
        intel_update_czclk(dev_priv);
-       intel_update_rawclk(dev_priv);
        intel_update_cdclk(dev);
  
        intel_shared_dpll_init(dev);
@@@ -15762,7 -15715,7 +15714,7 @@@ static void intel_modeset_readout_hw_st
                struct intel_crtc_state *crtc_state = crtc->config;
                int pixclk = 0;
  
 -              __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
 +              __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
                memset(crtc_state, 0, sizeof(*crtc_state));
                crtc_state->base.crtc = &crtc->base;
  
  
  void intel_modeset_gem_init(struct drm_device *dev)
  {
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
        struct drm_i915_gem_object *obj;
        int ret;
  
-       intel_init_gt_powersave(dev);
+       intel_init_gt_powersave(dev_priv);
  
        intel_modeset_init_hw(dev);
  
-       intel_setup_overlay(dev);
+       intel_setup_overlay(dev_priv);
  
        /*
         * Make sure any fbs we allocated at startup are properly
@@@ -16076,7 -16030,7 +16029,7 @@@ void intel_modeset_cleanup(struct drm_d
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_connector *connector;
  
-       intel_disable_gt_powersave(dev);
+       intel_disable_gt_powersave(dev_priv);
  
        intel_backlight_unregister(dev);
  
  
        drm_mode_config_cleanup(dev);
  
-       intel_cleanup_overlay(dev);
+       intel_cleanup_overlay(dev_priv);
  
-       intel_cleanup_gt_powersave(dev);
+       intel_cleanup_gt_powersave(dev_priv);
  
        intel_teardown_gmbus(dev);
  }
@@@ -16204,9 -16158,8 +16157,8 @@@ struct intel_display_error_state 
  };
  
  struct intel_display_error_state *
- intel_display_capture_error_state(struct drm_device *dev)
+ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_display_error_state *error;
        int transcoders[] = {
                TRANSCODER_A,
        };
        int i;
  
-       if (INTEL_INFO(dev)->num_pipes == 0)
+       if (INTEL_INFO(dev_priv)->num_pipes == 0)
                return NULL;
  
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
        if (error == NULL)
                return NULL;
  
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
  
        for_each_pipe(dev_priv, i) {
  
                error->plane[i].control = I915_READ(DSPCNTR(i));
                error->plane[i].stride = I915_READ(DSPSTRIDE(i));
-               if (INTEL_INFO(dev)->gen <= 3) {
+               if (INTEL_GEN(dev_priv) <= 3) {
                        error->plane[i].size = I915_READ(DSPSIZE(i));
                        error->plane[i].pos = I915_READ(DSPPOS(i));
                }
-               if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+               if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
                        error->plane[i].addr = I915_READ(DSPADDR(i));
-               if (INTEL_INFO(dev)->gen >= 4) {
+               if (INTEL_GEN(dev_priv) >= 4) {
                        error->plane[i].surface = I915_READ(DSPSURF(i));
                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
                }
  
                error->pipe[i].source = I915_READ(PIPESRC(i));
  
-               if (HAS_GMCH_DISPLAY(dev))
+               if (HAS_GMCH_DISPLAY(dev_priv))
                        error->pipe[i].stat = I915_READ(PIPESTAT(i));
        }
  
        /* Note: this does not include DSI transcoders. */
-       error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+       error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
        if (HAS_DDI(dev_priv))
                error->num_transcoders++; /* Account for eDP. */
  
@@@ -150,10 -150,10 +150,10 @@@ static int intelfb_alloc(struct drm_fb_
        if (size * 2 < ggtt->stolen_usable_size)
                obj = i915_gem_object_create_stolen(dev, size);
        if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, size);
-       if (!obj) {
+               obj = i915_gem_object_create(dev, size);
+       if (IS_ERR(obj)) {
                DRM_ERROR("failed to allocate framebuffer\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(obj);
                goto out;
        }
  
@@@ -186,9 -186,11 +186,11 @@@ static int intelfb_create(struct drm_fb
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct fb_info *info;
        struct drm_framebuffer *fb;
+       struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
-       int size, ret;
        bool prealloc = false;
+       void *vaddr;
+       int ret;
  
        if (intel_fb &&
            (sizes->fb_width > intel_fb->base.width ||
        }
  
        obj = intel_fb->obj;
-       size = obj->base.size;
  
        mutex_lock(&dev->struct_mutex);
  
        info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
        info->fbops = &intelfb_ops;
  
+       vma = i915_gem_obj_to_ggtt(obj);
        /* setup aperture base/size for vesafb takeover */
        info->apertures->ranges[0].base = dev->mode_config.fb_base;
        info->apertures->ranges[0].size = ggtt->mappable_end;
  
-       info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
-       info->fix.smem_len = size;
+       info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+       info->fix.smem_len = vma->node.size;
  
-       info->screen_base =
-               ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
-                          size);
-       if (!info->screen_base) {
+       vaddr = i915_vma_pin_iomap(vma);
+       if (IS_ERR(vaddr)) {
                DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
-               ret = -ENOSPC;
+               ret = PTR_ERR(vaddr);
                goto out_destroy_fbi;
        }
-       info->screen_size = size;
+       info->screen_base = vaddr;
+       info->screen_size = vma->node.size;
  
        /* This driver doesn't need a VT switch to restore the mode on resume */
        info->skip_vt_switch = true;
  out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
  out_unpin:
-       i915_gem_object_ggtt_unpin(obj);
+       intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
  out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@@ -368,12 -370,12 +370,12 @@@ static bool intel_fb_initial_config(str
        uint64_t conn_configured = 0, mask;
        int pass = 0;
  
 -      save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
 +      save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
                               GFP_KERNEL);
        if (!save_enabled)
                return false;
  
 -      memcpy(save_enabled, enabled, dev->mode_config.num_connector);
 +      memcpy(save_enabled, enabled, fb_helper->connector_count);
        mask = (1 << fb_helper->connector_count) - 1;
  retry:
        for (i = 0; i < fb_helper->connector_count; i++) {
        if (fallback) {
  bail:
                DRM_DEBUG_KMS("Not using firmware configuration\n");
 -              memcpy(enabled, save_enabled, dev->mode_config.num_connector);
 +              memcpy(enabled, save_enabled, fb_helper->connector_count);
                kfree(save_enabled);
                return false;
        }
@@@ -551,6 -553,11 +553,11 @@@ static void intel_fbdev_destroy(struct 
  
        if (ifbdev->fb) {
                drm_framebuffer_unregister_private(&ifbdev->fb->base);
+               mutex_lock(&dev->struct_mutex);
+               intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+               mutex_unlock(&dev->struct_mutex);
                drm_framebuffer_remove(&ifbdev->fb->base);
        }
  }
@@@ -168,7 -168,7 +168,7 @@@ struct overlay_registers 
  };
  
  struct intel_overlay {
-       struct drm_device *dev;
+       struct drm_i915_private *i915;
        struct intel_crtc *crtc;
        struct drm_i915_gem_object *vid_bo;
        struct drm_i915_gem_object *old_vid_bo;
  static struct overlay_registers __iomem *
  intel_overlay_map_regs(struct intel_overlay *overlay)
  {
-       struct drm_i915_private *dev_priv = to_i915(overlay->dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct overlay_registers __iomem *regs;
  
-       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_wc(ggtt->mappable,
-                                        i915_gem_obj_ggtt_offset(overlay->reg_bo));
+               regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+                                        overlay->flip_addr,
+                                        PAGE_SIZE);
  
        return regs;
  }
  static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
                                     struct overlay_registers __iomem *regs)
  {
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
                io_mapping_unmap(regs);
  }
  
@@@ -232,14 -232,13 +232,13 @@@ static int intel_overlay_do_wait_reques
  /* overlay needs to be disable in OCMD reg */
  static int intel_overlay_on(struct intel_overlay *overlay)
  {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
        int ret;
  
        WARN_ON(overlay->active);
-       WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+       WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
  
        req = i915_gem_request_alloc(engine, NULL);
        if (IS_ERR(req))
  static int intel_overlay_continue(struct intel_overlay *overlay,
                                  bool load_polyphase_filter)
  {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
        u32 flip_addr = overlay->flip_addr;
@@@ -335,8 -333,7 +333,7 @@@ static void intel_overlay_off_tail(stru
  /* overlay needs to be disabled in OCMD reg */
  static int intel_overlay_off(struct intel_overlay *overlay)
  {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
        u32 flip_addr = overlay->flip_addr;
        intel_ring_emit(engine, flip_addr);
        intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
        /* turn overlay off */
-       if (IS_I830(dev)) {
+       if (IS_I830(dev_priv)) {
                /* Workaround: Don't disable the overlay fully, since otherwise
                 * it dies on the next OVERLAY_ON cmd. */
                intel_ring_emit(engine, MI_NOOP);
@@@ -408,12 -405,11 +405,11 @@@ static int intel_overlay_recover_from_i
   */
  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
  {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        int ret;
  
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       lockdep_assert_held(&dev_priv->dev->struct_mutex);
  
        /* Only wait if there is actually an old frame to release to
         * guarantee forward progress.
@@@ -537,10 -533,10 +533,10 @@@ static int uv_vsubsampling(u32 format
        }
  }
  
- static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
  {
        u32 mask, shift, ret;
-       if (IS_GEN2(dev)) {
+       if (IS_GEN2(dev_priv)) {
                mask = 0x1f;
                shift = 5;
        } else {
                shift = 6;
        }
        ret = ((offset + width + mask) >> shift) - (offset >> shift);
-       if (!IS_GEN2(dev))
+       if (!IS_GEN2(dev_priv))
                ret <<= 1;
        ret -= 1;
        return ret << 2;
@@@ -741,12 -737,12 +737,12 @@@ static int intel_overlay_do_put_image(s
        int ret, tmp_width;
        struct overlay_registers __iomem *regs;
        bool scale_changed = false;
-       struct drm_device *dev = overlay->dev;
+       struct drm_i915_private *dev_priv = overlay->i915;
        u32 swidth, swidthsw, sheight, ostride;
        enum pipe pipe = overlay->crtc->pipe;
  
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       lockdep_assert_held(&dev_priv->dev->struct_mutex);
+       WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
  
        ret = intel_overlay_release_old_vid(overlay);
        if (ret != 0)
                        goto out_unpin;
                }
                oconfig = OCONF_CC_OUT_8BIT;
-               if (IS_GEN4(overlay->dev))
+               if (IS_GEN4(dev_priv))
                        oconfig |= OCONF_CSC_MODE_BT709;
                oconfig |= pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
                tmp_width = params->src_w;
  
        swidth = params->src_w;
-       swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+       swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
        sheight = params->src_h;
        iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
        ostride = params->stride_Y;
                int uv_vscale = uv_vsubsampling(params->format);
                u32 tmp_U, tmp_V;
                swidth |= (params->src_w/uv_hscale) << 16;
-               tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+               tmp_U = calc_swidthsw(dev_priv, params->offset_U,
                                      params->src_w/uv_hscale);
-               tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+               tmp_V = calc_swidthsw(dev_priv, params->offset_V,
                                      params->src_w/uv_hscale);
                swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
                sheight |= (params->src_h/uv_vscale) << 16;
        overlay->old_vid_bo = overlay->vid_bo;
        overlay->vid_bo = new_bo;
  
-       intel_frontbuffer_flip(dev,
-                              INTEL_FRONTBUFFER_OVERLAY(pipe));
+       intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
  
        return 0;
  
@@@ -852,12 -847,12 +847,12 @@@ out_unpin
  
  int intel_overlay_switch_off(struct intel_overlay *overlay)
  {
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct overlay_registers __iomem *regs;
-       struct drm_device *dev = overlay->dev;
        int ret;
  
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       lockdep_assert_held(&dev_priv->dev->struct_mutex);
+       WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
  
        ret = intel_overlay_recover_from_interrupt(overlay);
        if (ret != 0)
@@@ -897,15 -892,14 +892,14 @@@ static int check_overlay_possible_on_cr
  
  static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
  {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        u32 pfit_control = I915_READ(PFIT_CONTROL);
        u32 ratio;
  
        /* XXX: This is not the same logic as in the xorg driver, but more in
         * line with the intel documentation for the i965
         */
-       if (INTEL_INFO(dev)->gen >= 4) {
+       if (INTEL_GEN(dev_priv) >= 4) {
                /* on i965 use the PGM reg to read out the autoscaler values */
                ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
        } else {
@@@ -948,7 -942,7 +942,7 @@@ static int check_overlay_scaling(struc
        return 0;
  }
  
- static int check_overlay_src(struct drm_device *dev,
+ static int check_overlay_src(struct drm_i915_private *dev_priv,
                             struct drm_intel_overlay_put_image *rec,
                             struct drm_i915_gem_object *new_bo)
  {
        u32 tmp;
  
        /* check src dimensions */
-       if (IS_845G(dev) || IS_I830(dev)) {
+       if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
                if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
                    rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
                        return -EINVAL;
                return -EINVAL;
  
        /* stride checking */
-       if (IS_I830(dev) || IS_845G(dev))
+       if (IS_I830(dev_priv) || IS_845G(dev_priv))
                stride_mask = 255;
        else
                stride_mask = 63;
  
        if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
                return -EINVAL;
-       if (IS_GEN4(dev) && rec->stride_Y < 512)
+       if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
                return -EINVAL;
  
        tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
   * Return the pipe currently connected to the panel fitter,
   * or -1 if the panel fitter is not present or not in use
   */
- static int intel_panel_fitter_pipe(struct drm_device *dev)
+ static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32  pfit_control;
  
        /* i830 doesn't have a panel fitter */
-       if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+       if (INTEL_GEN(dev_priv) <= 3 &&
+           (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
                return -1;
  
        pfit_control = I915_READ(PFIT_CONTROL);
                return -1;
  
        /* 965 can place panel fitter on either pipe */
-       if (IS_GEN4(dev))
+       if (IS_GEN4(dev_priv))
                return (pfit_control >> 29) & 0x3;
  
        /* older chips can only use pipe 1 */
        return 1;
  }
  
- int intel_overlay_put_image(struct drm_device *dev, void *data,
-                           struct drm_file *file_priv)
+ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv)
  {
        struct drm_intel_overlay_put_image *put_image_rec = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
        }
        crtc = to_intel_crtc(drmmode_crtc);
  
 -      new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
 +      new_bo = to_intel_bo(drm_gem_object_lookup(file_priv,
                                                   put_image_rec->bo_handle));
        if (&new_bo->base == NULL) {
                ret = -ENOENT;
  
                /* line too wide, i.e. one-line-mode */
                if (mode->hdisplay > 1024 &&
-                   intel_panel_fitter_pipe(dev) == crtc->pipe) {
+                   intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
                        overlay->pfit_active = true;
                        update_pfit_vscale_ratio(overlay);
                } else
                goto out_unlock;
        }
  
-       ret = check_overlay_src(dev, put_image_rec, new_bo);
+       ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
        if (ret != 0)
                goto out_unlock;
        params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@@ -1284,8 -1278,8 +1278,8 @@@ static int check_gamma(struct drm_intel
        return 0;
  }
  
- int intel_overlay_attrs(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv)
+ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
  {
        struct drm_intel_overlay_attrs *attrs = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
                attrs->contrast   = overlay->contrast;
                attrs->saturation = overlay->saturation;
  
-               if (!IS_GEN2(dev)) {
+               if (!IS_GEN2(dev_priv)) {
                        attrs->gamma0 = I915_READ(OGAMC0);
                        attrs->gamma1 = I915_READ(OGAMC1);
                        attrs->gamma2 = I915_READ(OGAMC2);
                intel_overlay_unmap_regs(overlay, regs);
  
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
-                       if (IS_GEN2(dev))
+                       if (IS_GEN2(dev_priv))
                                goto out_unlock;
  
                        if (overlay->active) {
@@@ -1371,37 -1365,36 +1365,36 @@@ out_unlock
        return ret;
  }
  
- void intel_setup_overlay(struct drm_device *dev)
+ void intel_setup_overlay(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_overlay *overlay;
        struct drm_i915_gem_object *reg_bo;
        struct overlay_registers __iomem *regs;
        int ret;
  
-       if (!HAS_OVERLAY(dev))
+       if (!HAS_OVERLAY(dev_priv))
                return;
  
        overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
        if (!overlay)
                return;
  
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev_priv->dev->struct_mutex);
        if (WARN_ON(dev_priv->overlay))
                goto out_free;
  
-       overlay->dev = dev;
+       overlay->i915 = dev_priv;
  
        reg_bo = NULL;
-       if (!OVERLAY_NEEDS_PHYSICAL(dev))
-               reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
-       if (reg_bo == NULL)
-               reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
+               reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
        if (reg_bo == NULL)
+               reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
+       if (IS_ERR(reg_bo))
                goto out_free;
        overlay->reg_bo = reg_bo;
  
-       if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+       if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
                ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
                if (ret) {
                        DRM_ERROR("failed to attach phys overlay regs\n");
        intel_overlay_unmap_regs(overlay, regs);
  
        dev_priv->overlay = overlay;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->dev->struct_mutex);
        DRM_INFO("initialized overlay support\n");
        return;
  
  out_unpin_bo:
-       if (!OVERLAY_NEEDS_PHYSICAL(dev))
+       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
                i915_gem_object_ggtt_unpin(reg_bo);
  out_free_bo:
        drm_gem_object_unreference(&reg_bo->base);
  out_free:
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->dev->struct_mutex);
        kfree(overlay);
        return;
  }
  
- void intel_cleanup_overlay(struct drm_device *dev)
+ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        if (!dev_priv->overlay)
                return;
  
@@@ -1482,18 -1473,17 +1473,17 @@@ struct intel_overlay_error_state 
  static struct overlay_registers __iomem *
  intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
  {
-       struct drm_i915_private *dev_priv = to_i915(overlay->dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct overlay_registers __iomem *regs;
  
-       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
                /* Cast to make sparse happy, but it's wc memory anyway, so
                 * equivalent to the wc io mapping on X86. */
                regs = (struct overlay_registers __iomem *)
                        overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_atomic_wc(ggtt->mappable,
-                                               i915_gem_obj_ggtt_offset(overlay->reg_bo));
+               regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+                                               overlay->flip_addr);
  
        return regs;
  }
  static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
                                        struct overlay_registers __iomem *regs)
  {
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
                io_mapping_unmap_atomic(regs);
  }
  
  struct intel_overlay_error_state *
- intel_overlay_capture_error_state(struct drm_device *dev)
+ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_overlay *overlay = dev_priv->overlay;
        struct intel_overlay_error_state *error;
        struct overlay_registers __iomem *regs;
  
        error->dovsta = I915_READ(DOVSTA);
        error->isr = I915_READ(ISR);
-       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-               error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
-       else
-               error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+       error->base = overlay->flip_addr;
  
        regs = intel_overlay_map_regs_atomic(overlay);
        if (!regs)
@@@ -1638,12 -1638,6 +1638,12 @@@ static int pwm_setup_backlight(struct i
                return -ENODEV;
        }
  
 +      /*
 +       * FIXME: pwm_apply_args() should be removed when switching to
 +       * the atomic PWM API.
 +       */
 +      pwm_apply_args(panel->backlight.pwm);
 +
        retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
                            CRC_PMIC_PWM_PERIOD_NS);
        if (retval < 0) {
@@@ -1724,6 -1718,14 +1724,14 @@@ intel_panel_init_backlight_funcs(struc
                container_of(panel, struct intel_connector, panel);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
  
+       if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+           intel_dp_aux_init_backlight_funcs(connector) == 0)
+               return;
+       if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
+           intel_dsi_dcs_init_backlight_funcs(connector) == 0)
+               return;
        if (IS_BROXTON(dev_priv)) {
                panel->backlight.setup = bxt_setup_backlight;
                panel->backlight.enable = bxt_enable_backlight;
diff --combined include/drm/drmP.h
@@@ -52,7 -52,6 +52,7 @@@
  #include <linux/poll.h>
  #include <linux/ratelimit.h>
  #include <linux/sched.h>
 +#include <linux/seqlock.h>
  #include <linux/slab.h>
  #include <linux/types.h>
  #include <linux/vmalloc.h>
@@@ -393,6 -392,11 +393,6 @@@ struct drm_master 
        void *driver_priv;
  };
  
 -/* Size of ringbuffer for vblank timestamps. Just double-buffer
 - * in initial implementation.
 - */
 -#define DRM_VBLANKTIME_RBSIZE 2
 -
  /* Flags and return codes for get_vblank_timestamp() driver function. */
  #define DRM_CALLED_FROM_VBLIRQ 1
  #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@@@ -721,10 -725,10 +721,10 @@@ struct drm_vblank_crtc 
        wait_queue_head_t queue;        /**< VBLANK wait queue */
        struct timer_list disable_timer;                /* delayed disable timer */
  
 -      /* vblank counter, protected by dev->vblank_time_lock for writes */
 -      u32 count;
 -      /* vblank timestamps, protected by dev->vblank_time_lock for writes */
 -      struct timeval time[DRM_VBLANKTIME_RBSIZE];
 +      seqlock_t seqlock;              /* protects vblank count and time */
 +
 +      u32 count;                      /* vblank counter */
 +      struct timeval time;            /* vblank timestamp */
  
        atomic_t refcount;              /* number of users of vblank interruptsper crtc */
        u32 last;                       /* protected by dev->vbl_lock, used */
@@@ -810,6 -814,14 +810,6 @@@ struct drm_device 
        bool irq_enabled;
        int irq;
  
 -      /*
 -       * At load time, disabling the vblank interrupt won't be allowed since
 -       * old clients may not call the modeset ioctl and therefore misbehave.
 -       * Once the modeset ioctl *has* been called though, we can safely
 -       * disable them when unused.
 -       */
 -      bool vblank_disable_allowed;
 -
        /*
         * If true, vblank interrupt will be disabled immediately when the
         * refcount drops to zero, as opposed to via the vblank disable
@@@ -990,6 -1002,7 +990,7 @@@ extern void drm_crtc_vblank_off(struct 
  extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
  extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
  extern void drm_vblank_cleanup(struct drm_device *dev);
+ extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
  extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
  
  extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,