Merge tag 'drm-intel-next-2015-04-23-fixed' of git://anongit.freedesktop.org/drm...
authorDave Airlie <airlied@redhat.com>
Fri, 8 May 2015 10:51:06 +0000 (20:51 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 8 May 2015 10:51:06 +0000 (20:51 +1000)
drm-intel-next-2015-04-23:
- dither support for ns2501 dvo (Thomas Richter)
- some polish for the gtt code and fixes to finally enable the cmd parser on hsw
- first pile of bxt stage 1 enabling (too many different people to list ...)
- more psr fixes from Rodrigo
- skl rotation support from Chandra
- more atomic work from Ander and Matt
- pile of cleanups and micro-ops for execlist from Chris
drm-intel-next-2015-04-10:
- cdclk handling cleanup and fixes from Ville
- more prep patches for olr removal from John Harrison
- gmbus pin naming rework from Jani (prep for bxt)
- remove ->new_config from Ander (more atomic conversion work)
- rps (boost) tuning and unification with byt/bsw from Chris
- cmd parser batch bool tuning from Chris
- gen8 dynamic pte allocation (Michel Thierry, based on work from Ben Widawsky)
- execlist tuning (not yet all of it) from Chris
- add drm_plane_from_index (Chandra)
- various small things all over

* tag 'drm-intel-next-2015-04-23-fixed' of git://anongit.freedesktop.org/drm-intel: (204 commits)
  drm/i915/gtt: Allocate va range only if vma is not bound
  drm/i915: Enable cmd parser to do secure batch promotion for aliasing ppgtt
  drm/i915: fix intel_prepare_ddi
  drm/i915: factor out ddi_get_encoder_port
  drm/i915/hdmi: check port in ibx_infoframe_enabled
  drm/i915/hdmi: fix vlv infoframe port check
  drm/i915: Silence compiler warning in dvo
  drm/i915: Update DRIVER_DATE to 20150423
  drm/i915: Enable dithering on NatSemi DVO2501 for Fujitsu S6010
  rm/i915: Move i915_get_ggtt_vma_pages into ggtt_bind_vma
  drm/i915: Don't try to outsmart gcc in i915_gem_gtt.c
  drm/i915: Unduplicate i915_ggtt_unbind/bind_vma
  drm/i915: Move ppgtt_bind/unbind around
  drm/i915: move i915_gem_restore_gtt_mappings around
  drm/i915: Fix up the vma aliasing ppgtt binding
  drm/i915: Remove misleading comment around bind_to_vm
  drm/i915: Don't use atomics for pg_dirty_rings
  drm/i915: Don't look at pg_dirty_rings for aliasing ppgtt
  drm/i915/skl: Support Y tiling in MMIO flips
  drm/i915: Fixup kerneldoc for struct intel_context
  ...

Conflicts:
drivers/gpu/drm/i915/i915_drv.c

17 files changed:
1  2 
Documentation/DocBook/drm.tmpl
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_uncore.c
include/drm/drm_crtc.h

@@@ -1293,7 -1293,7 +1293,7 @@@ int max_width, max_height;</synopsis
            </para>
            <para>
              If a page flip can be successfully scheduled the driver must set the
 -            <code>drm_crtc-&lt;fb</code> field to the new framebuffer pointed to
 +            <code>drm_crtc-&gt;fb</code> field to the new framebuffer pointed to
              by <code>fb</code>. This is important so that the reference counting
              on framebuffers stays balanced.
            </para>
@@@ -4067,7 -4067,7 +4067,7 @@@ int num_ioctls;</synopsis
          <title>DPIO</title>
  !Pdrivers/gpu/drm/i915/i915_reg.h DPIO
        <table id="dpiox2">
-         <title>Dual channel PHY (VLV/CHV)</title>
+         <title>Dual channel PHY (VLV/CHV/BXT)</title>
          <tgroup cols="8">
            <colspec colname="c0" />
            <colspec colname="c1" />
          </tgroup>
        </table>
        <table id="dpiox1">
-         <title>Single channel PHY (CHV)</title>
+         <title>Single channel PHY (CHV/BXT)</title>
          <tgroup cols="4">
            <colspec colname="c0" />
            <colspec colname="c1" />
@@@ -1288,6 -1288,29 +1288,29 @@@ unsigned int drm_plane_index(struct drm
  }
  EXPORT_SYMBOL(drm_plane_index);
  
+ /**
+  * drm_plane_from_index - find the registered plane at an index
+  * @dev: DRM device
+  * @idx: index of registered plane to find for
+  *
+  * Given a plane index, return the registered plane from DRM device's
+  * list of planes with matching index.
+  */
+ struct drm_plane *
+ drm_plane_from_index(struct drm_device *dev, int idx)
+ {
+       struct drm_plane *plane;
+       unsigned int i = 0;
+       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+               if (i == idx)
+                       return plane;
+               i++;
+       }
+       return NULL;
+ }
+ EXPORT_SYMBOL(drm_plane_from_index);
  /**
   * drm_plane_force_disable - Forcibly disable a plane
   * @plane: plane to disable
@@@ -2135,7 -2158,7 +2158,7 @@@ int drm_mode_getconnector(struct drm_de
        connector = drm_connector_find(dev, out_resp->connector_id);
        if (!connector) {
                ret = -ENOENT;
 -              goto out;
 +              goto out_unlock;
        }
  
        for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
  
  out:
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 +
 +out_unlock:
        mutex_unlock(&dev->mode_config.mutex);
  
        return ret;
@@@ -2484,17 -2505,6 +2507,17 @@@ static int __setplane_internal(struct d
                goto out;
        }
  
 +      /* Give drivers some help against integer overflows */
 +      if (crtc_w > INT_MAX ||
 +          crtc_x > INT_MAX - (int32_t) crtc_w ||
 +          crtc_h > INT_MAX ||
 +          crtc_y > INT_MAX - (int32_t) crtc_h) {
 +              DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
 +                            crtc_w, crtc_h, crtc_x, crtc_y);
 +              return -ERANGE;
 +      }
 +
 +
        fb_width = fb->width << 16;
        fb_height = fb->height << 16;
  
@@@ -2579,6 -2589,17 +2602,6 @@@ int drm_mode_setplane(struct drm_devic
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
  
 -      /* Give drivers some help against integer overflows */
 -      if (plane_req->crtc_w > INT_MAX ||
 -          plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
 -          plane_req->crtc_h > INT_MAX ||
 -          plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
 -              DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
 -                            plane_req->crtc_w, plane_req->crtc_h,
 -                            plane_req->crtc_x, plane_req->crtc_y);
 -              return -ERANGE;
 -      }
 -
        /*
         * First, find the plane, crtc, and fb objects.  If not available,
         * we don't bother to call the driver.
@@@ -381,6 -381,18 +381,18 @@@ static const struct intel_device_info i
        IVB_CURSOR_OFFSETS,
  };
  
+ static const struct intel_device_info intel_broxton_info = {
+       .is_preliminary = 1,
+       .gen = 9,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .num_pipes = 3,
+       .has_ddi = 1,
+       .has_fbc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       IVB_CURSOR_OFFSETS,
+ };
  /*
   * Make sure any device matches here are from most specific to most
   * general.  For example, since the Quanta match is based on the subsystem
        INTEL_CHV_IDS(&intel_cherryview_info),  \
        INTEL_SKL_GT1_IDS(&intel_skylake_info), \
        INTEL_SKL_GT2_IDS(&intel_skylake_info), \
-       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info)      \
+       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),     \
+       INTEL_BXT_IDS(&intel_broxton_info)
  
  static const struct pci_device_id pciidlist[] = {             /* aka */
        INTEL_PCI_IDS,
@@@ -996,6 -1009,38 +1009,38 @@@ static int hsw_suspend_complete(struct 
        return 0;
  }
  
+ static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
+ {
+       struct drm_device *dev = dev_priv->dev;
+       /* TODO: when DC5 support is added disable DC5 here. */
+       broxton_ddi_phy_uninit(dev);
+       broxton_uninit_cdclk(dev);
+       bxt_enable_dc9(dev_priv);
+       return 0;
+ }
+ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
+ {
+       struct drm_device *dev = dev_priv->dev;
+       /* TODO: when CSR FW support is added make sure the FW is loaded */
+       bxt_disable_dc9(dev_priv);
+       /*
+        * TODO: when DC5 support is added enable DC5 here if the CSR FW
+        * is available.
+        */
+       broxton_init_cdclk(dev);
+       broxton_ddi_phy_init(dev);
+       intel_prepare_ddi(dev);
+       return 0;
+ }
  /*
   * Save all Gunit registers that may be lost after a D3 and a subsequent
   * S0i[R123] transition. The list of registers needing a save/restore is
@@@ -1038,7 -1083,7 +1083,7 @@@ static void vlv_save_gunit_s0ix_state(s
                s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
  
        s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
 -      s->gfx_max_req_count    = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
 +      s->gfx_max_req_count    = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
  
        s->render_hwsp          = I915_READ(RENDER_HWS_PGA_GEN7);
        s->ecochk               = I915_READ(GAM_ECOCHK);
        /* Gunit-Display CZ domain, 0x182028-0x1821CF */
        s->gu_ctl0              = I915_READ(VLV_GU_CTL0);
        s->gu_ctl1              = I915_READ(VLV_GU_CTL1);
 +      s->pcbr                 = I915_READ(VLV_PCBR);
        s->clock_gate_dis2      = I915_READ(VLV_GUNIT_CLOCK_GATE2);
  
        /*
@@@ -1120,7 -1164,7 +1165,7 @@@ static void vlv_restore_gunit_s0ix_stat
                I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
  
        I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
 -      I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
 +      I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
  
        I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
        I915_WRITE(GAM_ECOCHK,          s->ecochk);
        /* Gunit-Display CZ domain, 0x182028-0x1821CF */
        I915_WRITE(VLV_GU_CTL0,                 s->gu_ctl0);
        I915_WRITE(VLV_GU_CTL1,                 s->gu_ctl1);
 +      I915_WRITE(VLV_PCBR,                    s->pcbr);
        I915_WRITE(VLV_GUNIT_CLOCK_GATE2,       s->clock_gate_dis2);
  }
  
@@@ -1195,7 -1238,21 +1240,21 @@@ int vlv_force_gfx_clock(struct drm_i915
        u32 val;
        int err;
  
+       val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
  #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
+       /* Wait for a previous force-off to settle */
+       if (force_on && !IS_CHERRYVIEW(dev_priv->dev)) {
+               /* WARN_ON only for the Valleyview */
+               WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
+               err = wait_for(!COND, 20);
+               if (err) {
+                       DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
+                                 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
+                       return err;
+               }
+       }
  
        val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
        val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
@@@ -1454,6 -1511,9 +1513,9 @@@ static int intel_runtime_resume(struct 
  
        if (IS_GEN6(dev_priv))
                intel_init_pch_refclk(dev);
+       if (IS_BROXTON(dev))
+               ret = bxt_resume_prepare(dev_priv);
        else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                hsw_disable_pc8(dev_priv);
        else if (IS_VALLEYVIEW(dev_priv))
@@@ -1486,7 -1546,9 +1548,9 @@@ static int intel_suspend_complete(struc
        struct drm_device *dev = dev_priv->dev;
        int ret;
  
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+       if (IS_BROXTON(dev))
+               ret = bxt_suspend_complete(dev_priv);
+       else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                ret = hsw_suspend_complete(dev_priv);
        else if (IS_VALLEYVIEW(dev))
                ret = vlv_suspend_complete(dev_priv);
@@@ -56,7 -56,7 +56,7 @@@
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
- #define DRIVER_DATE           "20150327"
+ #define DRIVER_DATE           "20150423"
  
  #undef WARN_ON
  /* Many gcc seem to no see through this and fall over :( */
@@@ -130,7 -130,7 +130,7 @@@ enum transcoder 
   *
   * This value doesn't count the cursor plane.
   */
- #define I915_MAX_PLANES       3
+ #define I915_MAX_PLANES       4
  
  enum plane {
        PLANE_A = 0,
@@@ -251,7 -251,6 +251,6 @@@ enum hpd_pin 
                            &dev->mode_config.connector_list,   \
                            base.head)
  
  #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
        list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
                if ((intel_encoder)->base.crtc == (__crtc))
@@@ -303,6 -302,9 +302,9 @@@ struct intel_dpll_hw_state 
        uint32_t ctrl1;
        /* HDMI only, 0 when used for DP */
        uint32_t cfgcr1, cfgcr2;
+       /* bxt */
+       uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pcsdw12;
  };
  
  struct intel_shared_dpll_config {
@@@ -455,6 -457,7 +457,7 @@@ struct drm_i915_error_state 
                u32 semaphore_seqno[I915_NUM_RINGS - 1];
  
                /* Register state */
+               u32 start;
                u32 tail;
                u32 head;
                u32 ctl;
@@@ -766,7 -769,7 +769,7 @@@ struct i915_ctx_hang_stats 
   *           context).
   * @hang_stats: information about the role of this context in possible GPU
   *            hangs.
-  * @vm: virtual memory space used by this context.
+  * @ppgtt: virtual memory space used by this context.
   * @legacy_hw_ctx: render context backing object and whether it is correctly
   *                initialized (legacy ring submission mechanism only).
   * @link: link in the global list of contexts.
@@@ -880,7 -883,8 +883,8 @@@ struct i915_psr 
        bool active;
        struct delayed_work work;
        unsigned busy_frontbuffer_bits;
-       bool link_standby;
+       bool psr2_support;
+       bool aux_frame_sync;
  };
  
  enum intel_pch {
@@@ -994,7 -998,6 +998,7 @@@ struct vlv_s0ix_state 
        /* Display 2 CZ domain */
        u32 gu_ctl0;
        u32 gu_ctl1;
 +      u32 pcbr;
        u32 clock_gate_dis2;
  };
  
@@@ -1034,11 -1037,16 +1038,16 @@@ struct intel_gen6_power_mgmt 
        u8 rp0_freq;            /* Non-overclocked max frequency. */
        u32 cz_freq;
  
+       u8 up_threshold; /* Current %busy required to uplock */
+       u8 down_threshold; /* Current %busy required to downclock */
        int last_adj;
        enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
  
        bool enabled;
        struct delayed_work delayed_resume_work;
+       struct list_head clients;
+       unsigned boosts;
  
        /* manual wa residency calculations */
        struct intel_rps_ei up_ei, down_ei;
@@@ -1136,11 -1144,6 +1145,6 @@@ struct intel_l3_parity 
        int which_slice;
  };
  
- struct i915_gem_batch_pool {
-       struct drm_device *dev;
-       struct list_head cache_list;
- };
  struct i915_gem_mm {
        /** Memory allocator for GTT stolen memory */
        struct drm_mm stolen;
         */
        struct list_head unbound_list;
  
-       /*
-        * A pool of objects to use as shadow copies of client batch buffers
-        * when the command parser is enabled. Prevents the client from
-        * modifying the batch contents after software parsing.
-        */
-       struct i915_gem_batch_pool batch_pool;
        /** Usable portion of the GTT for GEM */
        unsigned long stolen_base; /* limited to low memory (32-bit) */
  
@@@ -1563,7 -1559,9 +1560,9 @@@ struct i915_virtual_gpu 
  
  struct drm_i915_private {
        struct drm_device *dev;
-       struct kmem_cache *slab;
+       struct kmem_cache *objects;
+       struct kmem_cache *vmas;
+       struct kmem_cache *requests;
  
        const struct intel_device_info info;
  
  
        struct i915_virtual_gpu vgpu;
  
-       struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+       struct intel_gmbus gmbus[GMBUS_NUM_PINS];
  
        /** gmbus_mutex protects against concurrent usage of the single hw gmbus
         * controller on different i2c buses. */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
  
        unsigned int fsb_freq, mem_freq, is_ddr3;
-       unsigned int vlv_cdclk_freq;
+       unsigned int cdclk_freq;
        unsigned int hpll_freq;
  
        /**
  
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
        struct {
-               int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
-                                 struct intel_engine_cs *ring,
-                                 struct intel_context *ctx,
-                                 struct drm_i915_gem_execbuffer2 *args,
-                                 struct list_head *vmas,
-                                 struct drm_i915_gem_object *batch_obj,
-                                 u64 exec_start, u32 flags);
+               int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
+                                     struct intel_engine_cs *ring,
+                                     struct intel_context *ctx,
+                                     struct drm_i915_gem_execbuffer2 *args,
+                                     struct list_head *vmas,
+                                     struct drm_i915_gem_object *batch_obj,
+                                     u64 exec_start, u32 flags);
                int (*init_rings)(struct drm_device *dev);
                void (*cleanup_ring)(struct intel_engine_cs *ring);
                void (*stop_ring)(struct intel_engine_cs *ring);
        } gt;
  
-       uint32_t request_uniq;
        /*
         * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
         * will be rejected. Instead look for a better place.
@@@ -1917,7 -1912,7 +1913,7 @@@ struct drm_i915_gem_object 
        /** Used in execbuf to temporarily hold a ref */
        struct list_head obj_exec_link;
  
-       struct list_head batch_pool_list;
+       struct list_head batch_pool_link;
  
        /**
         * This is set if the object is on the active lists (has pending
         * accurate mappable working set.
         */
        unsigned int fault_mappable:1;
-       unsigned int pin_mappable:1;
-       unsigned int pin_display:1;
  
        /*
         * Is the object to be mapped as read-only to the GPU
  
        unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
  
+       unsigned int pin_display;
        struct sg_table *pages;
        int pages_pin_count;
+       struct get_page {
+               struct scatterlist *sg;
+               int last;
+       } get_page;
  
        /* prime dma-buf support */
        void *dma_buf_vmapping;
@@@ -2046,6 -2045,7 +2046,7 @@@ struct drm_i915_gem_request 
        struct kref ref;
  
        /** On Which ring this request was generated */
+       struct drm_i915_private *i915;
        struct intel_engine_cs *ring;
  
        /** GEM sequence number associated with this request. */
        /** process identifier submitting this request */
        struct pid *pid;
  
-       uint32_t uniq;
        /**
         * The ELSP only accepts two elements at a time, so we queue
         * context/tail pairs on a given queue (ring->execlist_queue) until the
  
  };
  
+ int i915_gem_request_alloc(struct intel_engine_cs *ring,
+                          struct intel_context *ctx);
  void i915_gem_request_free(struct kref *req_ref);
  
  static inline uint32_t
@@@ -2143,6 -2143,19 +2144,19 @@@ i915_gem_request_unreference(struct drm
        kref_put(&req->ref, i915_gem_request_free);
  }
  
+ static inline void
+ i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
+ {
+       struct drm_device *dev;
+       if (!req)
+               return;
+       dev = req->ring->dev;
+       if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
+               mutex_unlock(&dev->struct_mutex);
+ }
  static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
                                           struct drm_i915_gem_request *src)
  {
@@@ -2168,12 -2181,13 +2182,13 @@@ struct drm_i915_file_private 
        struct {
                spinlock_t lock;
                struct list_head request_list;
-               struct delayed_work idle_work;
        } mm;
        struct idr context_idr;
  
-       atomic_t rps_wait_boost;
-       struct  intel_engine_cs *bsd_ring;
+       struct list_head rps_boost;
+       struct intel_engine_cs *bsd_ring;
+       unsigned rps_boosts;
  };
  
  /*
@@@ -2307,6 -2321,7 +2322,7 @@@ struct drm_i915_cmd_table 
  #define IS_HASWELL(dev)       (INTEL_INFO(dev)->is_haswell)
  #define IS_BROADWELL(dev)     (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
  #define IS_SKYLAKE(dev)       (INTEL_INFO(dev)->is_skylake)
+ #define IS_BROXTON(dev)       (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
  #define IS_MOBILE(dev)                (INTEL_INFO(dev)->is_mobile)
  #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
  #define SKL_REVID_D0          (0x3)
  #define SKL_REVID_E0          (0x4)
  
+ #define BXT_REVID_A0          (0x0)
+ #define BXT_REVID_B0          (0x3)
+ #define BXT_REVID_C0          (0x6)
  /*
   * The genX designation typically refers to the render engine, so render
   * capability related checks should use IS_GEN, while display and other checks
@@@ -2520,6 -2539,13 +2540,13 @@@ void intel_uncore_forcewake_get(struct 
                                enum forcewake_domains domains);
  void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
                                enum forcewake_domains domains);
+ /* Like above but the caller must manage the uncore.lock itself.
+  * Must be used with I915_READ_FW and friends.
+  */
+ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+                                       enum forcewake_domains domains);
+ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+                                       enum forcewake_domains domains);
  void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
  static inline bool intel_vgpu_active(struct drm_device *dev)
  {
@@@ -2614,10 -2640,13 +2641,13 @@@ void i915_init_vm(struct drm_i915_priva
  void i915_gem_free_object(struct drm_gem_object *obj);
  void i915_gem_vma_destroy(struct i915_vma *vma);
  
- #define PIN_MAPPABLE 0x1
- #define PIN_NONBLOCK 0x2
- #define PIN_GLOBAL 0x4
- #define PIN_OFFSET_BIAS 0x8
+ /* Flags used by pin/bind&friends. */
+ #define PIN_MAPPABLE  (1<<0)
+ #define PIN_NONBLOCK  (1<<1)
+ #define PIN_GLOBAL    (1<<2)
+ #define PIN_OFFSET_BIAS       (1<<3)
+ #define PIN_USER      (1<<4)
+ #define PIN_UPDATE    (1<<5)
  #define PIN_OFFSET_MASK (~4095)
  int __must_check
  i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@@ -2641,15 -2670,32 +2671,32 @@@ int i915_gem_obj_prepare_shmem_read(str
                                    int *needs_clflush);
  
  int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
- static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+ static inline int __sg_page_count(struct scatterlist *sg)
  {
-       struct sg_page_iter sg_iter;
+       return sg->length >> PAGE_SHIFT;
+ }
  
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
-               return sg_page_iter_page(&sg_iter);
+ static inline struct page *
+ i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+ {
+       if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
+               return NULL;
  
-       return NULL;
+       if (n < obj->get_page.last) {
+               obj->get_page.sg = obj->pages->sgl;
+               obj->get_page.last = 0;
+       }
+       while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
+               obj->get_page.last += __sg_page_count(obj->get_page.sg++);
+               if (unlikely(sg_is_chain(obj->get_page.sg)))
+                       obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
+       }
+       return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
  }
  static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
  {
        BUG_ON(obj->pages == NULL);
@@@ -2993,8 -3039,10 +3040,10 @@@ int i915_verify_lists(struct drm_devic
  int i915_debugfs_init(struct drm_minor *minor);
  void i915_debugfs_cleanup(struct drm_minor *minor);
  #ifdef CONFIG_DEBUG_FS
+ int i915_debugfs_connector_add(struct drm_connector *connector);
  void intel_display_crc_init(struct drm_device *dev);
  #else
+ static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
  static inline void intel_display_crc_init(struct drm_device *dev) {}
  #endif
  
@@@ -3021,13 -3069,6 +3070,6 @@@ void i915_destroy_error_state(struct dr
  void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
  const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
  
- /* i915_gem_batch_pool.c */
- void i915_gem_batch_pool_init(struct drm_device *dev,
-                             struct i915_gem_batch_pool *pool);
- void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
- struct drm_i915_gem_object*
- i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
  /* i915_cmd_parser.c */
  int i915_cmd_parser_get_version(void);
  int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
@@@ -3051,13 -3092,11 +3093,11 @@@ void i915_teardown_sysfs(struct drm_dev
  /* intel_i2c.c */
  extern int intel_setup_gmbus(struct drm_device *dev);
  extern void intel_teardown_gmbus(struct drm_device *dev);
- static inline bool intel_gmbus_is_port_valid(unsigned port)
- {
-       return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
- }
+ extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+                                    unsigned int pin);
  
- extern struct i2c_adapter *intel_gmbus_get_adapter(
              struct drm_i915_private *dev_priv, unsigned port);
+ extern struct i2c_adapter *
intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
  extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
  extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
  static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@@ -3203,6 -3242,17 +3243,17 @@@ int intel_freq_opcode(struct drm_i915_p
  #define POSTING_READ(reg)     (void)I915_READ_NOTRACE(reg)
  #define POSTING_READ16(reg)   (void)I915_READ16_NOTRACE(reg)
  
+ /* These are untraced mmio-accessors that are only valid to be used inside
+  * criticial sections inside IRQ handlers where forcewake is explicitly
+  * controlled.
+  * Think twice, and think again, before using these.
+  * Note: Should only be used between intel_uncore_forcewake_irqlock() and
+  * intel_uncore_forcewake_irqunlock().
+  */
+ #define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__))
+ #define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__))
+ #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
  /* "Broadcast RGB" property */
  #define INTEL_BROADCAST_RGB_AUTO 0
  #define INTEL_BROADCAST_RGB_FULL 1
@@@ -378,13 -378,13 +378,13 @@@ out
  void *i915_gem_object_alloc(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
+       return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
  }
  
  void i915_gem_object_free(struct drm_i915_gem_object *obj)
  {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       kmem_cache_free(dev_priv->slab, obj);
+       kmem_cache_free(dev_priv->objects, obj);
  }
  
  static int
@@@ -1181,12 -1181,27 +1181,27 @@@ static bool missed_irq(struct drm_i915_
        return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
  }
  
- static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+ static int __i915_spin_request(struct drm_i915_gem_request *rq)
  {
-       if (file_priv == NULL)
-               return true;
+       unsigned long timeout;
+       if (i915_gem_request_get_ring(rq)->irq_refcount)
+               return -EBUSY;
+       timeout = jiffies + 1;
+       while (!need_resched()) {
+               if (i915_gem_request_completed(rq, true))
+                       return 0;
+               if (time_after_eq(jiffies, timeout))
+                       break;
  
-       return !atomic_xchg(&file_priv->rps_wait_boost, true);
+               cpu_relax_lowlatency();
+       }
+       if (i915_gem_request_completed(rq, false))
+               return 0;
+       return -EAGAIN;
  }
  
  /**
@@@ -1230,20 -1245,23 +1245,23 @@@ int __i915_wait_request(struct drm_i915
        timeout_expire = timeout ?
                jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
  
-       if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
-               gen6_rps_boost(dev_priv);
-               if (file_priv)
-                       mod_delayed_work(dev_priv->wq,
-                                        &file_priv->mm.idle_work,
-                                        msecs_to_jiffies(100));
-       }
-       if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
-               return -ENODEV;
+       if (INTEL_INFO(dev)->gen >= 6)
+               gen6_rps_boost(dev_priv, file_priv);
  
        /* Record current time in case interrupted by signal, or wedged */
        trace_i915_gem_request_wait_begin(req);
        before = ktime_get_raw_ns();
+       /* Optimistic spin for the next jiffie before touching IRQs */
+       ret = __i915_spin_request(req);
+       if (ret == 0)
+               goto out;
+       if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+               ret = -ENODEV;
+               goto out;
+       }
        for (;;) {
                struct timer_list timer;
  
                        destroy_timer_on_stack(&timer);
                }
        }
-       now = ktime_get_raw_ns();
-       trace_i915_gem_request_wait_end(req);
        if (!irq_test_in_progress)
                ring->irq_put(ring);
  
        finish_wait(&ring->irq_queue, &wait);
  
+ out:
+       now = ktime_get_raw_ns();
+       trace_i915_gem_request_wait_end(req);
        if (timeout) {
                s64 tres = *timeout - (now - before);
  
@@@ -2178,6 -2197,10 +2197,10 @@@ i915_gem_object_get_pages(struct drm_i9
                return ret;
  
        list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+       obj->get_page.sg = obj->pages->sgl;
+       obj->get_page.last = 0;
        return 0;
  }
  
@@@ -2377,11 -2400,10 +2400,11 @@@ int __i915_add_request(struct intel_eng
                ret = ring->add_request(ring);
                if (ret)
                        return ret;
 +
 +              request->tail = intel_ring_get_tail(ringbuf);
        }
  
        request->head = request_start;
 -      request->tail = intel_ring_get_tail(ringbuf);
  
        /* Whilst this request exists, batch_obj will be on the
         * active_list, and so will hold the active reference. Only when this
  
        i915_queue_hangcheck(ring->dev);
  
-       cancel_delayed_work_sync(&dev_priv->mm.idle_work);
        queue_delayed_work(dev_priv->wq,
                           &dev_priv->mm.retire_work,
                           round_jiffies_up_relative(HZ));
@@@ -2516,7 -2537,45 +2538,45 @@@ void i915_gem_request_free(struct kref 
                i915_gem_context_unreference(ctx);
        }
  
-       kfree(req);
+       kmem_cache_free(req->i915->requests, req);
+ }
+ int i915_gem_request_alloc(struct intel_engine_cs *ring,
+                          struct intel_context *ctx)
+ {
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct drm_i915_gem_request *rq;
+       int ret;
+       if (ring->outstanding_lazy_request)
+               return 0;
+       rq = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+       if (rq == NULL)
+               return -ENOMEM;
+       kref_init(&rq->ref);
+       rq->i915 = dev_priv;
+       ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
+       if (ret) {
+               kfree(rq);
+               return ret;
+       }
+       rq->ring = ring;
+       if (i915.enable_execlists)
+               ret = intel_logical_ring_alloc_request_extras(rq, ctx);
+       else
+               ret = intel_ring_alloc_request_extras(rq);
+       if (ret) {
+               kfree(rq);
+               return ret;
+       }
+       ring->outstanding_lazy_request = rq;
+       return 0;
  }
  
  struct drm_i915_gem_request *
@@@ -2578,7 -2637,6 +2638,6 @@@ static void i915_gem_reset_ring_cleanup
                                struct drm_i915_gem_request,
                                execlist_link);
                list_del(&submit_req->execlist_link);
-               intel_runtime_pm_put(dev_priv);
  
                if (submit_req->ctx != ring->default_context)
                        intel_lr_context_unpin(ring, submit_req->ctx);
@@@ -2768,8 -2826,25 +2827,25 @@@ i915_gem_idle_work_handler(struct work_
  {
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv), mm.idle_work.work);
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_engine_cs *ring;
+       int i;
  
-       intel_mark_idle(dev_priv->dev);
+       for_each_ring(ring, dev_priv, i)
+               if (!list_empty(&ring->request_list))
+                       return;
+       intel_mark_idle(dev);
+       if (mutex_trylock(&dev->struct_mutex)) {
+               struct intel_engine_cs *ring;
+               int i;
+               for_each_ring(ring, dev_priv, i)
+                       i915_gem_batch_pool_fini(&ring->batch_pool);
+               mutex_unlock(&dev->struct_mutex);
+       }
  }
  
  /**
@@@ -2867,9 -2942,7 +2943,7 @@@ i915_gem_wait_ioctl(struct drm_device *
        ret = __i915_wait_request(req, reset_counter, true,
                                  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                  file->driver_priv);
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_request_unreference(req);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_request_unreference__unlocked(req);
        return ret;
  
  out:
@@@ -2994,7 -3067,7 +3068,7 @@@ int i915_vma_unbind(struct i915_vma *vm
  
        trace_i915_vma_unbind(vma);
  
-       vma->unbind_vma(vma);
+       vma->vm->unbind_vma(vma);
  
        list_del_init(&vma->mm_list);
        if (i915_is_ggtt(vma->vm)) {
@@@ -3515,20 -3588,8 +3589,8 @@@ search_free
        if (ret)
                goto err_remove_node;
  
-       /*  allocate before insert / bind */
-       if (vma->vm->allocate_va_range) {
-               trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
-                               VM_TO_TRACE_NAME(vma->vm));
-               ret = vma->vm->allocate_va_range(vma->vm,
-                                               vma->node.start,
-                                               vma->node.size);
-               if (ret)
-                       goto err_remove_node;
-       }
        trace_i915_vma_bind(vma, flags);
-       ret = i915_vma_bind(vma, obj->cache_level,
-                           flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+       ret = i915_vma_bind(vma, obj->cache_level, flags);
        if (ret)
                goto err_finish_gtt;
  
@@@ -3754,7 -3815,7 +3816,7 @@@ int i915_gem_object_set_cache_level(str
                list_for_each_entry(vma, &obj->vma_list, vma_link)
                        if (drm_mm_node_allocated(&vma->node)) {
                                ret = i915_vma_bind(vma, cache_level,
-                                                   vma->bound & GLOBAL_BIND);
+                                                   PIN_UPDATE);
                                if (ret)
                                        return ret;
                        }
@@@ -3852,24 -3913,6 +3914,6 @@@ unlock
        return ret;
  }
  
- static bool is_pin_display(struct drm_i915_gem_object *obj)
- {
-       struct i915_vma *vma;
-       vma = i915_gem_obj_to_ggtt(obj);
-       if (!vma)
-               return false;
-       /* There are 2 sources that pin objects:
-        *   1. The display engine (scanouts, sprites, cursors);
-        *   2. Reservations for execbuffer;
-        *
-        * We can ignore reservations as we hold the struct_mutex and
-        * are only called outside of the reservation path.
-        */
-       return vma->pin_count;
- }
  /*
   * Prepare buffer for display plane (scanout, cursors, etc).
   * Can be called from an uninterruptible phase (modesetting) and allows
@@@ -3882,7 -3925,6 +3926,6 @@@ i915_gem_object_pin_to_display_plane(st
                                     const struct i915_ggtt_view *view)
  {
        u32 old_read_domains, old_write_domain;
-       bool was_pin_display;
        int ret;
  
        if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
        /* Mark the pin_display early so that we account for the
         * display coherency whilst setting up the cache domains.
         */
-       was_pin_display = obj->pin_display;
-       obj->pin_display = true;
+       obj->pin_display++;
  
        /* The display engine is not coherent with the LLC cache on gen6.  As
         * a result, we make sure that the pinning that is about to occur is
        return 0;
  
  err_unpin_display:
-       WARN_ON(was_pin_display != is_pin_display(obj));
-       obj->pin_display = was_pin_display;
+       obj->pin_display--;
        return ret;
  }
  
  i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
                                         const struct i915_ggtt_view *view)
  {
+       if (WARN_ON(obj->pin_display == 0))
+               return;
        i915_gem_object_ggtt_unpin_view(obj, view);
  
-       obj->pin_display = is_pin_display(obj);
+       obj->pin_display--;
  }
  
  int
@@@ -4072,9 -4115,7 +4116,7 @@@ i915_gem_ring_throttle(struct drm_devic
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_request_unreference(target);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_request_unreference__unlocked(target);
  
        return ret;
  }
@@@ -4155,18 -4196,12 +4197,12 @@@ i915_gem_object_do_pin(struct drm_i915_
  
        bound = vma ? vma->bound : 0;
        if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-               /* In true PPGTT, bind has possibly changed PDEs, which
-                * means we must do a context switch before the GPU can
-                * accurately read some of the VMAs.
-                */
                vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
                                                 flags);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
-       }
-       if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
-               ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+       } else {
+               ret = i915_vma_bind(vma, obj->cache_level, flags);
                if (ret)
                        return ret;
        }
        WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
  
        vma->pin_count++;
-       if (flags & PIN_MAPPABLE)
-               obj->pin_mappable |= true;
        return 0;
  }
  
@@@ -4235,8 -4267,7 +4268,7 @@@ i915_gem_object_ggtt_unpin_view(struct 
        WARN_ON(vma->pin_count == 0);
        WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
  
-       if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
-               obj->pin_mappable = false;
+       --vma->pin_count;
  }
  
  bool
@@@ -4375,7 -4406,7 +4407,7 @@@ void i915_gem_object_init(struct drm_i9
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
-       INIT_LIST_HEAD(&obj->batch_pool_list);
+       INIT_LIST_HEAD(&obj->batch_pool_link);
  
        obj->ops = ops;
  
@@@ -4577,7 -4608,7 +4609,7 @@@ void i915_gem_vma_destroy(struct i915_v
  
        list_del(&vma->vma_link);
  
-       kfree(vma);
+       kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
  }
  
  static void
@@@ -4864,12 -4895,12 +4896,12 @@@ int i915_gem_init(struct drm_device *de
        }
  
        if (!i915.enable_execlists) {
-               dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
+               dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
                dev_priv->gt.init_rings = i915_gem_init_rings;
                dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
                dev_priv->gt.stop_ring = intel_stop_ring_buffer;
        } else {
-               dev_priv->gt.do_execbuf = intel_execlists_submission;
+               dev_priv->gt.execbuf_submit = intel_execlists_submission;
                dev_priv->gt.init_rings = intel_logical_rings_init;
                dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
                dev_priv->gt.stop_ring = intel_logical_ring_stop;
@@@ -4951,11 -4982,21 +4983,21 @@@ i915_gem_load(struct drm_device *dev
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
  
-       dev_priv->slab =
+       dev_priv->objects =
                kmem_cache_create("i915_gem_object",
                                  sizeof(struct drm_i915_gem_object), 0,
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
+       dev_priv->vmas =
+               kmem_cache_create("i915_gem_vma",
+                                 sizeof(struct i915_vma), 0,
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
+       dev_priv->requests =
+               kmem_cache_create("i915_gem_request",
+                                 sizeof(struct drm_i915_gem_request), 0,
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
  
        INIT_LIST_HEAD(&dev_priv->vm_list);
        i915_init_vm(dev_priv, &dev_priv->gtt.base);
  
        i915_gem_shrinker_init(dev_priv);
  
-       i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
        mutex_init(&dev_priv->fb_tracking.lock);
  }
  
@@@ -5007,8 -5046,6 +5047,6 @@@ void i915_gem_release(struct drm_devic
  {
        struct drm_i915_file_private *file_priv = file->driver_priv;
  
-       cancel_delayed_work_sync(&file_priv->mm.idle_work);
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
                request->file_priv = NULL;
        }
        spin_unlock(&file_priv->mm.lock);
- }
- static void
- i915_gem_file_idle_work_handler(struct work_struct *work)
- {
-       struct drm_i915_file_private *file_priv =
-               container_of(work, typeof(*file_priv), mm.idle_work.work);
  
-       atomic_set(&file_priv->rps_wait_boost, false);
+       if (!list_empty(&file_priv->rps_boost)) {
+               mutex_lock(&to_i915(dev)->rps.hw_lock);
+               list_del(&file_priv->rps_boost);
+               mutex_unlock(&to_i915(dev)->rps.hw_lock);
+       }
  }
  
  int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        file->driver_priv = file_priv;
        file_priv->dev_priv = dev->dev_private;
        file_priv->file = file;
+       INIT_LIST_HEAD(&file_priv->rps_boost);
  
        spin_lock_init(&file_priv->mm.lock);
        INIT_LIST_HEAD(&file_priv->mm.request_list);
-       INIT_DELAYED_WORK(&file_priv->mm.idle_work,
-                         i915_gem_file_idle_work_handler);
  
        ret = i915_gem_context_open(dev, file);
        if (ret)
@@@ -5123,7 -5156,7 +5157,7 @@@ i915_gem_obj_ggtt_offset_view(struct dr
                    i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
  
-       WARN(1, "global vma for this object not found.\n");
+       WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
        return -1;
  }
  
@@@ -88,6 -88,12 +88,12 @@@ static const u32 hpd_status_i915[HPD_NU
        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
  };
  
+ /* BXT hpd list */
+ static const u32 hpd_bxt[HPD_NUM_PINS] = {
+       [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
+       [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
+ };
  /* IIR can theoretically queue up two events. Be paranoid. */
  #define GEN8_IRQ_RESET_NDX(type, which) do { \
        I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
@@@ -985,8 -991,7 +991,7 @@@ static void ironlake_rps_change_irq_han
        return;
  }
  
- static void notify_ring(struct drm_device *dev,
-                       struct intel_engine_cs *ring)
+ static void notify_ring(struct intel_engine_cs *ring)
  {
        if (!intel_ring_initialized(ring))
                return;
@@@ -1049,7 -1054,7 +1054,7 @@@ static u32 vlv_wa_c0_ei(struct drm_i915
        if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
                if (!vlv_c0_above(dev_priv,
                                  &dev_priv->rps.down_ei, &now,
-                                 VLV_RP_DOWN_EI_THRESHOLD))
+                                 dev_priv->rps.down_threshold))
                        events |= GEN6_PM_RP_DOWN_THRESHOLD;
                dev_priv->rps.down_ei = now;
        }
        if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
                if (vlv_c0_above(dev_priv,
                                 &dev_priv->rps.up_ei, &now,
-                                VLV_RP_UP_EI_THRESHOLD))
+                                dev_priv->rps.up_threshold))
                        events |= GEN6_PM_RP_UP_THRESHOLD;
                dev_priv->rps.up_ei = now;
        }
@@@ -1095,21 -1100,20 +1100,20 @@@ static void gen6_pm_rps_work(struct wor
        pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
  
        adj = dev_priv->rps.last_adj;
+       new_delay = dev_priv->rps.cur_freq;
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
                if (adj > 0)
                        adj *= 2;
-               else {
-                       /* CHV needs even encode values */
-                       adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
-               }
-               new_delay = dev_priv->rps.cur_freq + adj;
+               else /* CHV needs even encode values */
+                       adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
                /*
                 * For better performance, jump directly
                 * to RPe if we're below it.
                 */
-               if (new_delay < dev_priv->rps.efficient_freq)
+               if (new_delay < dev_priv->rps.efficient_freq - adj) {
                        new_delay = dev_priv->rps.efficient_freq;
+                       adj = 0;
+               }
        } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
                if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
                        new_delay = dev_priv->rps.efficient_freq;
        } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
                if (adj < 0)
                        adj *= 2;
-               else {
-                       /* CHV needs even encode values */
-                       adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
-               }
-               new_delay = dev_priv->rps.cur_freq + adj;
+               else /* CHV needs even encode values */
+                       adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
        } else { /* unknown event */
-               new_delay = dev_priv->rps.cur_freq;
+               adj = 0;
        }
  
+       dev_priv->rps.last_adj = adj;
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
         */
+       new_delay += adj;
        new_delay = clamp_t(int, new_delay,
                            dev_priv->rps.min_freq_softlimit,
                            dev_priv->rps.max_freq_softlimit);
  
-       dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
        intel_set_rps(dev_priv->dev, new_delay);
  
        mutex_unlock(&dev_priv->rps.hw_lock);
@@@ -1251,9 -1253,9 +1253,9 @@@ static void ilk_gt_irq_handler(struct d
  {
        if (gt_iir &
            (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-               notify_ring(dev, &dev_priv->ring[RCS]);
+               notify_ring(&dev_priv->ring[RCS]);
        if (gt_iir & ILK_BSD_USER_INTERRUPT)
-               notify_ring(dev, &dev_priv->ring[VCS]);
+               notify_ring(&dev_priv->ring[VCS]);
  }
  
  static void snb_gt_irq_handler(struct drm_device *dev,
  
        if (gt_iir &
            (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-               notify_ring(dev, &dev_priv->ring[RCS]);
+               notify_ring(&dev_priv->ring[RCS]);
        if (gt_iir & GT_BSD_USER_INTERRUPT)
-               notify_ring(dev, &dev_priv->ring[VCS]);
+               notify_ring(&dev_priv->ring[VCS]);
        if (gt_iir & GT_BLT_USER_INTERRUPT)
-               notify_ring(dev, &dev_priv->ring[BCS]);
+               notify_ring(&dev_priv->ring[BCS]);
  
        if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
                      GT_BSD_CS_ERROR_INTERRUPT |
                ivybridge_parity_error_irq_handler(dev, gt_iir);
  }
  
- static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
-                                      struct drm_i915_private *dev_priv,
+ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
                                       u32 master_ctl)
  {
-       struct intel_engine_cs *ring;
-       u32 rcs, bcs, vcs;
-       uint32_t tmp = 0;
        irqreturn_t ret = IRQ_NONE;
  
        if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-               tmp = I915_READ(GEN8_GT_IIR(0));
+               u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
                if (tmp) {
-                       I915_WRITE(GEN8_GT_IIR(0), tmp);
+                       I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
                        ret = IRQ_HANDLED;
  
-                       rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
-                       ring = &dev_priv->ring[RCS];
-                       if (rcs & GT_RENDER_USER_INTERRUPT)
-                               notify_ring(dev, ring);
-                       if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_lrc_irq_handler(ring);
-                       bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
-                       ring = &dev_priv->ring[BCS];
-                       if (bcs & GT_RENDER_USER_INTERRUPT)
-                               notify_ring(dev, ring);
-                       if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_lrc_irq_handler(ring);
+                       if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
+                               intel_lrc_irq_handler(&dev_priv->ring[RCS]);
+                       if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
+                               notify_ring(&dev_priv->ring[RCS]);
+                       if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
+                               intel_lrc_irq_handler(&dev_priv->ring[BCS]);
+                       if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
+                               notify_ring(&dev_priv->ring[BCS]);
                } else
                        DRM_ERROR("The master control interrupt lied (GT0)!\n");
        }
  
        if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
-               tmp = I915_READ(GEN8_GT_IIR(1));
+               u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
                if (tmp) {
-                       I915_WRITE(GEN8_GT_IIR(1), tmp);
+                       I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
                        ret = IRQ_HANDLED;
  
-                       vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
-                       ring = &dev_priv->ring[VCS];
-                       if (vcs & GT_RENDER_USER_INTERRUPT)
-                               notify_ring(dev, ring);
-                       if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_lrc_irq_handler(ring);
-                       vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
-                       ring = &dev_priv->ring[VCS2];
-                       if (vcs & GT_RENDER_USER_INTERRUPT)
-                               notify_ring(dev, ring);
-                       if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_lrc_irq_handler(ring);
-               } else
-                       DRM_ERROR("The master control interrupt lied (GT1)!\n");
-       }
+                       if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
+                               intel_lrc_irq_handler(&dev_priv->ring[VCS]);
+                       if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
+                               notify_ring(&dev_priv->ring[VCS]);
  
-       if (master_ctl & GEN8_GT_PM_IRQ) {
-               tmp = I915_READ(GEN8_GT_IIR(2));
-               if (tmp & dev_priv->pm_rps_events) {
-                       I915_WRITE(GEN8_GT_IIR(2),
-                                  tmp & dev_priv->pm_rps_events);
-                       ret = IRQ_HANDLED;
-                       gen6_rps_irq_handler(dev_priv, tmp);
+                       if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
+                               intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
+                       if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
+                               notify_ring(&dev_priv->ring[VCS2]);
                } else
-                       DRM_ERROR("The master control interrupt lied (PM)!\n");
+                       DRM_ERROR("The master control interrupt lied (GT1)!\n");
        }
  
        if (master_ctl & GEN8_GT_VECS_IRQ) {
-               tmp = I915_READ(GEN8_GT_IIR(3));
+               u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
                if (tmp) {
-                       I915_WRITE(GEN8_GT_IIR(3), tmp);
+                       I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
                        ret = IRQ_HANDLED;
  
-                       vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
-                       ring = &dev_priv->ring[VECS];
-                       if (vcs & GT_RENDER_USER_INTERRUPT)
-                               notify_ring(dev, ring);
-                       if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
-                               intel_lrc_irq_handler(ring);
+                       if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
+                               intel_lrc_irq_handler(&dev_priv->ring[VECS]);
+                       if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
+                               notify_ring(&dev_priv->ring[VECS]);
                } else
                        DRM_ERROR("The master control interrupt lied (GT3)!\n");
        }
  
+       if (master_ctl & GEN8_GT_PM_IRQ) {
+               u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
+               if (tmp & dev_priv->pm_rps_events) {
+                       I915_WRITE_FW(GEN8_GT_IIR(2),
+                                     tmp & dev_priv->pm_rps_events);
+                       ret = IRQ_HANDLED;
+                       gen6_rps_irq_handler(dev_priv, tmp);
+               } else
+                       DRM_ERROR("The master control interrupt lied (PM)!\n");
+       }
        return ret;
  }
  
@@@ -1440,7 -1428,7 +1428,7 @@@ static inline void intel_hpd_irq_handle
                if (port && dev_priv->hpd_irq_port[port]) {
                        bool long_hpd;
  
-                       if (HAS_PCH_SPLIT(dev)) {
+                       if (!HAS_GMCH_DISPLAY(dev_priv)) {
                                dig_shift = pch_port_to_hotplug_shift(port);
                                long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
                        } else {
@@@ -1654,7 -1642,7 +1642,7 @@@ static void gen6_rps_irq_handler(struc
  
        if (HAS_VEBOX(dev_priv->dev)) {
                if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-                       notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+                       notify_ring(&dev_priv->ring[VECS]);
  
                if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
                        DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@@ -1848,7 -1836,7 +1836,7 @@@ static irqreturn_t cherryview_irq_handl
                        I915_WRITE(VLV_IIR, iir);
                }
  
-               gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+               gen8_gt_irq_handler(dev_priv, master_ctl);
  
                /* Call regardless, as some status bits might not be
                 * signalled in iir */
@@@ -2164,6 -2152,38 +2152,38 @@@ static irqreturn_t ironlake_irq_handler
        return ret;
  }
  
+ static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t hp_control;
+       uint32_t hp_trigger;
+       /* Get the status */
+       hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
+       hp_control = I915_READ(BXT_HOTPLUG_CTL);
+       /* Hotplug not enabled ? */
+       if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
+               DRM_ERROR("Interrupt when HPD disabled\n");
+               return;
+       }
+       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+               hp_control & BXT_HOTPLUG_CTL_MASK);
+       /* Check for HPD storm and schedule bottom half */
+       intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
+       /*
+        * FIXME: Save the hot plug status for bottom half before
+        * clearing the sticky status bits, else the status will be
+        * lost.
+        */
+       /* Clear sticky bits in hpd status */
+       I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+ }
  static irqreturn_t gen8_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = arg;
                aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
                        GEN9_AUX_CHANNEL_D;
  
-       master_ctl = I915_READ(GEN8_MASTER_IRQ);
+       master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
        master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
        if (!master_ctl)
                return IRQ_NONE;
  
-       I915_WRITE(GEN8_MASTER_IRQ, 0);
-       POSTING_READ(GEN8_MASTER_IRQ);
+       I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
  
        /* Find, clear, then process each source of interrupt */
  
-       ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+       ret = gen8_gt_irq_handler(dev_priv, master_ctl);
  
        if (master_ctl & GEN8_DE_MISC_IRQ) {
                tmp = I915_READ(GEN8_DE_MISC_IIR);
        if (master_ctl & GEN8_DE_PORT_IRQ) {
                tmp = I915_READ(GEN8_DE_PORT_IIR);
                if (tmp) {
+                       bool found = false;
                        I915_WRITE(GEN8_DE_PORT_IIR, tmp);
                        ret = IRQ_HANDLED;
  
-                       if (tmp & aux_mask)
+                       if (tmp & aux_mask) {
                                dp_aux_irq_handler(dev);
-                       else
+                               found = true;
+                       }
+                       if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
+                               bxt_hpd_handler(dev, tmp);
+                               found = true;
+                       }
+                       if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
+                               gmbus_irq_handler(dev);
+                               found = true;
+                       }
+                       if (!found)
                                DRM_ERROR("Unexpected DE Port interrupt\n");
                }
                else
                        DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
        }
  
-       if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
+       if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
+           master_ctl & GEN8_DE_PCH_IRQ) {
                /*
                 * FIXME(BDW): Assume for now that the new interrupt handling
                 * scheme also closed the SDE interrupt handling race we've seen
  
        }
  
-       I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
-       POSTING_READ(GEN8_MASTER_IRQ);
+       I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+       POSTING_READ_FW(GEN8_MASTER_IRQ);
  
        return ret;
  }
@@@ -3104,7 -3139,8 +3139,8 @@@ static void gen8_irq_reset(struct drm_d
        GEN5_IRQ_RESET(GEN8_DE_MISC_);
        GEN5_IRQ_RESET(GEN8_PCU_);
  
-       ibx_irq_reset(dev);
+       if (HAS_PCH_SPLIT(dev))
+               ibx_irq_reset(dev);
  }
  
  void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@@ -3178,6 -3214,42 +3214,42 @@@ static void ibx_hpd_irq_setup(struct dr
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  }
  
+ static void bxt_hpd_irq_setup(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_encoder *intel_encoder;
+       u32 hotplug_port = 0;
+       u32 hotplug_ctrl;
+       /* Now, enable HPD */
+       for_each_intel_encoder(dev, intel_encoder) {
+               if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
+                               == HPD_ENABLED)
+                       hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
+       }
+       /* Mask all HPD control bits */
+       hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
+       /* Enable requested port in hotplug control */
+       /* TODO: implement (short) HPD support on port A */
+       WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
+       if (hotplug_port & BXT_DE_PORT_HP_DDIB)
+               hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
+       if (hotplug_port & BXT_DE_PORT_HP_DDIC)
+               hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
+       I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
+       /* Unmask DDI hotplug in IMR */
+       hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
+       I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
+       /* Enable DDI hotplug in IER */
+       hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
+       I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
+       POSTING_READ(GEN8_DE_PORT_IER);
+ }
  static void ibx_irq_postinstall(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -3448,13 -3520,16 +3520,16 @@@ static void gen8_de_irq_postinstall(str
        uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
        uint32_t de_pipe_enables;
        int pipe;
-       u32 aux_en = GEN8_AUX_CHANNEL_A;
+       u32 de_port_en = GEN8_AUX_CHANNEL_A;
  
        if (IS_GEN9(dev_priv)) {
                de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
                                  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
-               aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+               de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
                        GEN9_AUX_CHANNEL_D;
+               if (IS_BROXTON(dev_priv))
+                       de_port_en |= BXT_DE_PORT_GMBUS;
        } else
                de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
                                  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
                                          dev_priv->de_irq_mask[pipe],
                                          de_pipe_enables);
  
-       GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
+       GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
  }
  
  static int gen8_irq_postinstall(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       ibx_irq_pre_postinstall(dev);
+       if (HAS_PCH_SPLIT(dev))
+               ibx_irq_pre_postinstall(dev);
  
        gen8_gt_irq_postinstall(dev_priv);
        gen8_de_irq_postinstall(dev_priv);
  
-       ibx_irq_postinstall(dev);
+       if (HAS_PCH_SPLIT(dev))
+               ibx_irq_postinstall(dev);
  
        I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
        POSTING_READ(GEN8_MASTER_IRQ);
@@@ -3598,12 -3675,14 +3675,12 @@@ static int i8xx_irq_postinstall(struct 
                ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
                  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 -                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
 -                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 +                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
        I915_WRITE16(IMR, dev_priv->irq_mask);
  
        I915_WRITE16(IER,
                     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 -                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
                     I915_USER_INTERRUPT);
        POSTING_READ16(IER);
  
@@@ -3694,7 -3773,7 +3771,7 @@@ static irqreturn_t i8xx_irq_handler(in
                new_iir = I915_READ16(IIR); /* Flush posted writes */
  
                if (iir & I915_USER_INTERRUPT)
-                       notify_ring(dev, &dev_priv->ring[RCS]);
+                       notify_ring(&dev_priv->ring[RCS]);
  
                for_each_pipe(dev_priv, pipe) {
                        int plane = pipe;
@@@ -3765,12 -3844,14 +3842,12 @@@ static int i915_irq_postinstall(struct 
                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
                  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 -                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
 -                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 +                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
  
        enable_mask =
                I915_ASLE_INTERRUPT |
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 -              I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
                I915_USER_INTERRUPT;
  
        if (I915_HAS_HOTPLUG(dev)) {
@@@ -3883,7 -3964,7 +3960,7 @@@ static irqreturn_t i915_irq_handler(in
                new_iir = I915_READ(IIR); /* Flush posted writes */
  
                if (iir & I915_USER_INTERRUPT)
-                       notify_ring(dev, &dev_priv->ring[RCS]);
+                       notify_ring(&dev_priv->ring[RCS]);
  
                for_each_pipe(dev_priv, pipe) {
                        int plane = pipe;
@@@ -4110,9 -4191,9 +4187,9 @@@ static irqreturn_t i965_irq_handler(in
                new_iir = I915_READ(IIR); /* Flush posted writes */
  
                if (iir & I915_USER_INTERRUPT)
-                       notify_ring(dev, &dev_priv->ring[RCS]);
+                       notify_ring(&dev_priv->ring[RCS]);
                if (iir & I915_BSD_USER_INTERRUPT)
-                       notify_ring(dev, &dev_priv->ring[VCS]);
+                       notify_ring(&dev_priv->ring[VCS]);
  
                for_each_pipe(dev_priv, pipe) {
                        if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@@ -4294,7 -4375,10 +4371,10 @@@ void intel_irq_init(struct drm_i915_pri
                dev->driver->irq_uninstall = gen8_irq_uninstall;
                dev->driver->enable_vblank = gen8_enable_vblank;
                dev->driver->disable_vblank = gen8_disable_vblank;
-               dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+               if (HAS_PCH_SPLIT(dev))
+                       dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+               else
+                       dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev->driver->irq_handler = ironlake_irq_handler;
                dev->driver->irq_preinstall = ironlake_irq_reset;
@@@ -671,8 -671,6 +671,6 @@@ enum skl_disp_power_wells 
  #define   FB_FMAX_VMIN_FREQ_LO_MASK           0xf8000000
  
  #define VLV_CZ_CLOCK_TO_MILLI_SEC             100000
- #define VLV_RP_UP_EI_THRESHOLD                        90
- #define VLV_RP_DOWN_EI_THRESHOLD              70
  
  /* vlv2 north clock has */
  #define CCK_FUSE_REG                          0x8
  /**
   * DOC: DPIO
   *
-  * VLV and CHV have slightly peculiar display PHYs for driving DP/HDMI
+  * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
   * ports. DPIO is the name given to such a display PHY. These PHYs
   * don't follow the standard programming model using direct MMIO
   * registers, and instead their registers must be accessed trough IOSF
   * controlled from the display controller side. No DPIO registers
   * need to be accessed during AUX communication,
   *
-  * Generally the common lane corresponds to the pipe and
+  * Generally on VLV/CHV the common lane corresponds to the pipe and
   * the spline (PCS/TX) corresponds to the port.
   *
   * For dual channel PHY (VLV/CHV):
   *
   *  port D == PCS/TX CH0
   *
-  * Note: digital port B is DDI0, digital port C is DDI1,
-  * digital port D is DDI2
+  * On BXT the entire PHY channel corresponds to the port. That means
+  * the PLL is also now associated with the port rather than the pipe,
+  * and so the clock needs to be routed to the appropriate transcoder.
+  * Port A PLL is directly connected to transcoder EDP and port B/C
+  * PLLs can be routed to any transcoder A/B/C.
+  *
+  * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+  * digital port D (CHV) or port A (BXT).
   */
  /*
-  * Dual channel PHY (VLV/CHV)
+  * Dual channel PHY (VLV/CHV/BXT)
   * ---------------------------------
   * |      CH0      |      CH1      |
   * |  CMN/PLL/REF  |  CMN/PLL/REF  |
   * |     DDI0      |     DDI1      | DP/HDMI ports
   * ---------------------------------
   *
-  * Single channel PHY (CHV)
+  * Single channel PHY (CHV/BXT)
   * -----------------
   * |      CH0      |
   * |  CMN/PLL/REF  |
  #define   DPIO_FRC_LATENCY_SHFIT      8
  #define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
  #define   DPIO_UPAR_SHIFT             30
+ /* BXT PHY registers */
+ #define _BXT_PHY(phy, a, b)           _PIPE((phy), (a), (b))
+ #define BXT_P_CR_GT_DISP_PWRON                0x138090
+ #define   GT_DISPLAY_POWER_ON(phy)    (1 << (phy))
+ #define _PHY_CTL_FAMILY_EDP           0x64C80
+ #define _PHY_CTL_FAMILY_DDI           0x64C90
+ #define   COMMON_RESET_DIS            (1 << 31)
+ #define BXT_PHY_CTL_FAMILY(phy)               _BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
+                                                       _PHY_CTL_FAMILY_EDP)
+ /* BXT PHY PLL registers */
+ #define _PORT_PLL_A                   0x46074
+ #define _PORT_PLL_B                   0x46078
+ #define _PORT_PLL_C                   0x4607c
+ #define   PORT_PLL_ENABLE             (1 << 31)
+ #define   PORT_PLL_LOCK                       (1 << 30)
+ #define   PORT_PLL_REF_SEL            (1 << 27)
+ #define BXT_PORT_PLL_ENABLE(port)     _PORT(port, _PORT_PLL_A, _PORT_PLL_B)
+ #define _PORT_PLL_EBB_0_A             0x162034
+ #define _PORT_PLL_EBB_0_B             0x6C034
+ #define _PORT_PLL_EBB_0_C             0x6C340
+ #define   PORT_PLL_P1_MASK            (0x07 << 13)
+ #define   PORT_PLL_P1(x)              ((x)  << 13)
+ #define   PORT_PLL_P2_MASK            (0x1f << 8)
+ #define   PORT_PLL_P2(x)              ((x)  << 8)
+ #define BXT_PORT_PLL_EBB_0(port)      _PORT3(port, _PORT_PLL_EBB_0_A, \
+                                               _PORT_PLL_EBB_0_B,      \
+                                               _PORT_PLL_EBB_0_C)
+ #define _PORT_PLL_EBB_4_A             0x162038
+ #define _PORT_PLL_EBB_4_B             0x6C038
+ #define _PORT_PLL_EBB_4_C             0x6C344
+ #define   PORT_PLL_10BIT_CLK_ENABLE   (1 << 13)
+ #define   PORT_PLL_RECALIBRATE                (1 << 14)
+ #define BXT_PORT_PLL_EBB_4(port)      _PORT3(port, _PORT_PLL_EBB_4_A, \
+                                               _PORT_PLL_EBB_4_B,      \
+                                               _PORT_PLL_EBB_4_C)
+ #define _PORT_PLL_0_A                 0x162100
+ #define _PORT_PLL_0_B                 0x6C100
+ #define _PORT_PLL_0_C                 0x6C380
+ /* PORT_PLL_0_A */
+ #define   PORT_PLL_M2_MASK            0xFF
+ /* PORT_PLL_1_A */
+ #define   PORT_PLL_N_MASK             (0x0F << 8)
+ #define   PORT_PLL_N(x)                       ((x) << 8)
+ /* PORT_PLL_2_A */
+ #define   PORT_PLL_M2_FRAC_MASK               0x3FFFFF
+ /* PORT_PLL_3_A */
+ #define   PORT_PLL_M2_FRAC_ENABLE     (1 << 16)
+ /* PORT_PLL_6_A */
+ #define   PORT_PLL_PROP_COEFF_MASK    0xF
+ #define   PORT_PLL_INT_COEFF_MASK     (0x1F << 8)
+ #define   PORT_PLL_INT_COEFF(x)               ((x)  << 8)
+ #define   PORT_PLL_GAIN_CTL_MASK      (0x07 << 16)
+ #define   PORT_PLL_GAIN_CTL(x)                ((x)  << 16)
+ /* PORT_PLL_8_A */
+ #define   PORT_PLL_TARGET_CNT_MASK    0x3FF
+ #define _PORT_PLL_BASE(port)          _PORT3(port, _PORT_PLL_0_A,     \
+                                               _PORT_PLL_0_B,          \
+                                               _PORT_PLL_0_C)
+ #define BXT_PORT_PLL(port, idx)               (_PORT_PLL_BASE(port) + (idx) * 4)
+ /* BXT PHY common lane registers */
+ #define _PORT_CL1CM_DW0_A             0x162000
+ #define _PORT_CL1CM_DW0_BC            0x6C000
+ #define   PHY_POWER_GOOD              (1 << 16)
+ #define BXT_PORT_CL1CM_DW0(phy)               _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
+                                                       _PORT_CL1CM_DW0_A)
+ #define _PORT_CL1CM_DW9_A             0x162024
+ #define _PORT_CL1CM_DW9_BC            0x6C024
+ #define   IREF0RC_OFFSET_SHIFT                8
+ #define   IREF0RC_OFFSET_MASK         (0xFF << IREF0RC_OFFSET_SHIFT)
+ #define BXT_PORT_CL1CM_DW9(phy)               _BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
+                                                       _PORT_CL1CM_DW9_A)
+ #define _PORT_CL1CM_DW10_A            0x162028
+ #define _PORT_CL1CM_DW10_BC           0x6C028
+ #define   IREF1RC_OFFSET_SHIFT                8
+ #define   IREF1RC_OFFSET_MASK         (0xFF << IREF1RC_OFFSET_SHIFT)
+ #define BXT_PORT_CL1CM_DW10(phy)      _BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
+                                                       _PORT_CL1CM_DW10_A)
+ #define _PORT_CL1CM_DW28_A            0x162070
+ #define _PORT_CL1CM_DW28_BC           0x6C070
+ #define   OCL1_POWER_DOWN_EN          (1 << 23)
+ #define   DW28_OLDO_DYN_PWR_DOWN_EN   (1 << 22)
+ #define   SUS_CLK_CONFIG              0x3
+ #define BXT_PORT_CL1CM_DW28(phy)      _BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
+                                                       _PORT_CL1CM_DW28_A)
+ #define _PORT_CL1CM_DW30_A            0x162078
+ #define _PORT_CL1CM_DW30_BC           0x6C078
+ #define   OCL2_LDOFUSE_PWR_DIS                (1 << 6)
+ #define BXT_PORT_CL1CM_DW30(phy)      _BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
+                                                       _PORT_CL1CM_DW30_A)
+ /* Defined for PHY0 only */
+ #define BXT_PORT_CL2CM_DW6_BC         0x6C358
+ #define   DW6_OLDO_DYN_PWR_DOWN_EN    (1 << 28)
+ /* BXT PHY Ref registers */
+ #define _PORT_REF_DW3_A                       0x16218C
+ #define _PORT_REF_DW3_BC              0x6C18C
+ #define   GRC_DONE                    (1 << 22)
+ #define BXT_PORT_REF_DW3(phy)         _BXT_PHY((phy), _PORT_REF_DW3_BC, \
+                                                       _PORT_REF_DW3_A)
+ #define _PORT_REF_DW6_A                       0x162198
+ #define _PORT_REF_DW6_BC              0x6C198
+ /*
+  * FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
+  * after testing.
+  */
+ #define   GRC_CODE_SHIFT              23
+ #define   GRC_CODE_MASK                       (0x1FF << GRC_CODE_SHIFT)
+ #define   GRC_CODE_FAST_SHIFT         16
+ #define   GRC_CODE_FAST_MASK          (0x7F << GRC_CODE_FAST_SHIFT)
+ #define   GRC_CODE_SLOW_SHIFT         8
+ #define   GRC_CODE_SLOW_MASK          (0xFF << GRC_CODE_SLOW_SHIFT)
+ #define   GRC_CODE_NOM_MASK           0xFF
+ #define BXT_PORT_REF_DW6(phy)         _BXT_PHY((phy), _PORT_REF_DW6_BC,       \
+                                                     _PORT_REF_DW6_A)
+ #define _PORT_REF_DW8_A                       0x1621A0
+ #define _PORT_REF_DW8_BC              0x6C1A0
+ #define   GRC_DIS                     (1 << 15)
+ #define   GRC_RDY_OVRD                        (1 << 1)
+ #define BXT_PORT_REF_DW8(phy)         _BXT_PHY((phy), _PORT_REF_DW8_BC,       \
+                                                     _PORT_REF_DW8_A)
+ /* BXT PHY PCS registers */
+ #define _PORT_PCS_DW10_LN01_A         0x162428
+ #define _PORT_PCS_DW10_LN01_B         0x6C428
+ #define _PORT_PCS_DW10_LN01_C         0x6C828
+ #define _PORT_PCS_DW10_GRP_A          0x162C28
+ #define _PORT_PCS_DW10_GRP_B          0x6CC28
+ #define _PORT_PCS_DW10_GRP_C          0x6CE28
+ #define BXT_PORT_PCS_DW10_LN01(port)  _PORT3(port, _PORT_PCS_DW10_LN01_A, \
+                                                    _PORT_PCS_DW10_LN01_B, \
+                                                    _PORT_PCS_DW10_LN01_C)
+ #define BXT_PORT_PCS_DW10_GRP(port)   _PORT3(port, _PORT_PCS_DW10_GRP_A,  \
+                                                    _PORT_PCS_DW10_GRP_B,  \
+                                                    _PORT_PCS_DW10_GRP_C)
+ #define   TX2_SWING_CALC_INIT         (1 << 31)
+ #define   TX1_SWING_CALC_INIT         (1 << 30)
+ #define _PORT_PCS_DW12_LN01_A         0x162430
+ #define _PORT_PCS_DW12_LN01_B         0x6C430
+ #define _PORT_PCS_DW12_LN01_C         0x6C830
+ #define _PORT_PCS_DW12_LN23_A         0x162630
+ #define _PORT_PCS_DW12_LN23_B         0x6C630
+ #define _PORT_PCS_DW12_LN23_C         0x6CA30
+ #define _PORT_PCS_DW12_GRP_A          0x162c30
+ #define _PORT_PCS_DW12_GRP_B          0x6CC30
+ #define _PORT_PCS_DW12_GRP_C          0x6CE30
+ #define   LANESTAGGER_STRAP_OVRD      (1 << 6)
+ #define   LANE_STAGGER_MASK           0x1F
+ #define BXT_PORT_PCS_DW12_LN01(port)  _PORT3(port, _PORT_PCS_DW12_LN01_A, \
+                                                    _PORT_PCS_DW12_LN01_B, \
+                                                    _PORT_PCS_DW12_LN01_C)
+ #define BXT_PORT_PCS_DW12_LN23(port)  _PORT3(port, _PORT_PCS_DW12_LN23_A, \
+                                                    _PORT_PCS_DW12_LN23_B, \
+                                                    _PORT_PCS_DW12_LN23_C)
+ #define BXT_PORT_PCS_DW12_GRP(port)   _PORT3(port, _PORT_PCS_DW12_GRP_A, \
+                                                    _PORT_PCS_DW12_GRP_B, \
+                                                    _PORT_PCS_DW12_GRP_C)
+ /* BXT PHY TX registers */
+ #define _BXT_LANE_OFFSET(lane)           (((lane) >> 1) * 0x200 +     \
+                                         ((lane) & 1) * 0x80)
+ #define _PORT_TX_DW2_LN0_A            0x162508
+ #define _PORT_TX_DW2_LN0_B            0x6C508
+ #define _PORT_TX_DW2_LN0_C            0x6C908
+ #define _PORT_TX_DW2_GRP_A            0x162D08
+ #define _PORT_TX_DW2_GRP_B            0x6CD08
+ #define _PORT_TX_DW2_GRP_C            0x6CF08
+ #define BXT_PORT_TX_DW2_GRP(port)     _PORT3(port, _PORT_TX_DW2_GRP_A,  \
+                                                    _PORT_TX_DW2_GRP_B,  \
+                                                    _PORT_TX_DW2_GRP_C)
+ #define BXT_PORT_TX_DW2_LN0(port)     _PORT3(port, _PORT_TX_DW2_LN0_A,  \
+                                                    _PORT_TX_DW2_LN0_B,  \
+                                                    _PORT_TX_DW2_LN0_C)
+ #define   MARGIN_000_SHIFT            16
+ #define   MARGIN_000                  (0xFF << MARGIN_000_SHIFT)
+ #define   UNIQ_TRANS_SCALE_SHIFT      8
+ #define   UNIQ_TRANS_SCALE            (0xFF << UNIQ_TRANS_SCALE_SHIFT)
+ #define _PORT_TX_DW3_LN0_A            0x16250C
+ #define _PORT_TX_DW3_LN0_B            0x6C50C
+ #define _PORT_TX_DW3_LN0_C            0x6C90C
+ #define _PORT_TX_DW3_GRP_A            0x162D0C
+ #define _PORT_TX_DW3_GRP_B            0x6CD0C
+ #define _PORT_TX_DW3_GRP_C            0x6CF0C
+ #define BXT_PORT_TX_DW3_GRP(port)     _PORT3(port, _PORT_TX_DW3_GRP_A,  \
+                                                    _PORT_TX_DW3_GRP_B,  \
+                                                    _PORT_TX_DW3_GRP_C)
+ #define BXT_PORT_TX_DW3_LN0(port)     _PORT3(port, _PORT_TX_DW3_LN0_A,  \
+                                                    _PORT_TX_DW3_LN0_B,  \
+                                                    _PORT_TX_DW3_LN0_C)
+ #define   UNIQE_TRANGE_EN_METHOD      (1 << 27)
+ #define _PORT_TX_DW4_LN0_A            0x162510
+ #define _PORT_TX_DW4_LN0_B            0x6C510
+ #define _PORT_TX_DW4_LN0_C            0x6C910
+ #define _PORT_TX_DW4_GRP_A            0x162D10
+ #define _PORT_TX_DW4_GRP_B            0x6CD10
+ #define _PORT_TX_DW4_GRP_C            0x6CF10
+ #define BXT_PORT_TX_DW4_LN0(port)     _PORT3(port, _PORT_TX_DW4_LN0_A,  \
+                                                    _PORT_TX_DW4_LN0_B,  \
+                                                    _PORT_TX_DW4_LN0_C)
+ #define BXT_PORT_TX_DW4_GRP(port)     _PORT3(port, _PORT_TX_DW4_GRP_A,  \
+                                                    _PORT_TX_DW4_GRP_B,  \
+                                                    _PORT_TX_DW4_GRP_C)
+ #define   DEEMPH_SHIFT                        24
+ #define   DE_EMPHASIS                 (0xFF << DEEMPH_SHIFT)
+ #define _PORT_TX_DW14_LN0_A           0x162538
+ #define _PORT_TX_DW14_LN0_B           0x6C538
+ #define _PORT_TX_DW14_LN0_C           0x6C938
+ #define   LATENCY_OPTIM_SHIFT         30
+ #define   LATENCY_OPTIM                       (1 << LATENCY_OPTIM_SHIFT)
+ #define BXT_PORT_TX_DW14_LN(port, lane)       (_PORT3((port), _PORT_TX_DW14_LN0_A,   \
+                                                       _PORT_TX_DW14_LN0_B,   \
+                                                       _PORT_TX_DW14_LN0_C) + \
+                                        _BXT_LANE_OFFSET(lane))
  /*
   * Fence registers
   */
  /* control register for cpu gtt access */
  #define TILECTL                               0x101000
  #define   TILECTL_SWZCTL                      (1 << 0)
+ #define   TILECTL_TLBPF                       (1 << 1)
  #define   TILECTL_TLB_PREFETCH_DIS    (1 << 2)
  #define   TILECTL_BACKSNOOP_DIS               (1 << 3)
  
  #define   GEN9_F2_SS_DIS_SHIFT                20
  #define   GEN9_F2_SS_DIS_MASK         (0xf << GEN9_F2_SS_DIS_SHIFT)
  
- #define GEN8_EU_DISABLE0              0x9134
- #define GEN8_EU_DISABLE1              0x9138
- #define GEN8_EU_DISABLE2              0x913c
+ #define GEN9_EU_DISABLE(slice)                (0x9134 + (slice)*0x4)
  
  #define GEN6_BSD_SLEEP_PSMI_CONTROL   0x12050
  #define   GEN6_BSD_SLEEP_MSG_DISABLE  (1 << 0)
  #define   GMBUS_RATE_400KHZ   (2<<8) /* reserved on Pineview */
  #define   GMBUS_RATE_1MHZ     (3<<8) /* reserved on Pineview */
  #define   GMBUS_HOLD_EXT      (1<<7) /* 300ns hold time, rsvd on Pineview */
- #define   GMBUS_PORT_DISABLED 0
- #define   GMBUS_PORT_SSC      1
- #define   GMBUS_PORT_VGADDC   2
- #define   GMBUS_PORT_PANEL    3
- #define   GMBUS_PORT_DPD_CHV  3 /* HDMID_CHV */
- #define   GMBUS_PORT_DPC      4 /* HDMIC */
- #define   GMBUS_PORT_DPB      5 /* SDVO, HDMIB */
- #define   GMBUS_PORT_DPD      6 /* HDMID */
- #define   GMBUS_PORT_RESERVED 7 /* 7 reserved */
- #define   GMBUS_NUM_PORTS     (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
+ #define   GMBUS_PIN_DISABLED  0
+ #define   GMBUS_PIN_SSC               1
+ #define   GMBUS_PIN_VGADDC    2
+ #define   GMBUS_PIN_PANEL     3
+ #define   GMBUS_PIN_DPD_CHV   3 /* HDMID_CHV */
+ #define   GMBUS_PIN_DPC               4 /* HDMIC */
+ #define   GMBUS_PIN_DPB               5 /* SDVO, HDMIB */
+ #define   GMBUS_PIN_DPD               6 /* HDMID */
+ #define   GMBUS_PIN_RESERVED  7 /* 7 reserved */
+ #define   GMBUS_PIN_1_BXT     1
+ #define   GMBUS_PIN_2_BXT     2
+ #define   GMBUS_PIN_3_BXT     3
+ #define   GMBUS_NUM_PINS      7 /* including 0 */
  #define GMBUS1                        0x5104 /* command/status */
  #define   GMBUS_SW_CLR_INT    (1<<31)
  #define   GMBUS_SW_RDY                (1<<30)
  #define   GMBUS_CYCLE_INDEX   (2<<25)
  #define   GMBUS_CYCLE_STOP    (4<<25)
  #define   GMBUS_BYTE_COUNT_SHIFT 16
 +#define   GMBUS_BYTE_COUNT_MAX   256U
  #define   GMBUS_SLAVE_INDEX_SHIFT 8
  #define   GMBUS_SLAVE_ADDR_SHIFT 1
  #define   GMBUS_SLAVE_READ    (1<<0)
  #define EDP_PSR_CTL(dev)                      (EDP_PSR_BASE(dev) + 0)
  #define   EDP_PSR_ENABLE                      (1<<31)
  #define   BDW_PSR_SINGLE_FRAME                        (1<<30)
- #define   EDP_PSR_LINK_DISABLE                        (0<<27)
  #define   EDP_PSR_LINK_STANDBY                        (1<<27)
  #define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK    (3<<25)
  #define   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
  #define   EDP_PSR_DEBUG_MASK_MEMUP    (1<<26)
  #define   EDP_PSR_DEBUG_MASK_HPD      (1<<25)
  
+ #define EDP_PSR2_CTL                  0x6f900
+ #define   EDP_PSR2_ENABLE             (1<<31)
+ #define   EDP_SU_TRACK_ENABLE         (1<<30)
+ #define   EDP_MAX_SU_DISABLE_TIME(t)  ((t)<<20)
+ #define   EDP_MAX_SU_DISABLE_TIME_MASK        (0x1f<<20)
+ #define   EDP_PSR2_TP2_TIME_500               (0<<8)
+ #define   EDP_PSR2_TP2_TIME_100               (1<<8)
+ #define   EDP_PSR2_TP2_TIME_2500      (2<<8)
+ #define   EDP_PSR2_TP2_TIME_50                (3<<8)
+ #define   EDP_PSR2_TP2_TIME_MASK      (3<<8)
+ #define   EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
+ #define   EDP_PSR2_FRAME_BEFORE_SU_MASK       (0xf<<4)
+ #define   EDP_PSR2_IDLE_MASK          0xf
  /* VGA port control */
  #define ADPA                  0x61100
  #define PCH_ADPA                0xe1100
  #define   PLANE_CTL_ALPHA_HW_PREMULTIPLY      (  3 << 4)
  #define   PLANE_CTL_ROTATE_MASK                       0x3
  #define   PLANE_CTL_ROTATE_0                  0x0
+ #define   PLANE_CTL_ROTATE_90                 0x1
  #define   PLANE_CTL_ROTATE_180                        0x2
+ #define   PLANE_CTL_ROTATE_270                        0x3
  #define _PLANE_STRIDE_1_A                     0x70188
  #define _PLANE_STRIDE_2_A                     0x70288
  #define _PLANE_STRIDE_3_A                     0x70388
  #define PS_WIN_SZ(pipe)               _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
  #define PS_WIN_POS(pipe)      _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
  
+ /*
+  * Skylake scalers
+  */
+ #define _PS_1A_CTRL      0x68180
+ #define _PS_2A_CTRL      0x68280
+ #define _PS_1B_CTRL      0x68980
+ #define _PS_2B_CTRL      0x68A80
+ #define _PS_1C_CTRL      0x69180
+ #define PS_SCALER_EN        (1 << 31)
+ #define PS_SCALER_MODE_MASK (3 << 28)
+ #define PS_SCALER_MODE_DYN  (0 << 28)
+ #define PS_SCALER_MODE_HQ  (1 << 28)
+ #define PS_PLANE_SEL_MASK  (7 << 25)
+ #define PS_PLANE_SEL(plane) ((plane + 1) << 25)
+ #define PS_FILTER_MASK         (3 << 23)
+ #define PS_FILTER_MEDIUM       (0 << 23)
+ #define PS_FILTER_EDGE_ENHANCE (2 << 23)
+ #define PS_FILTER_BILINEAR     (3 << 23)
+ #define PS_VERT3TAP            (1 << 21)
+ #define PS_VERT_INT_INVERT_FIELD1 (0 << 20)
+ #define PS_VERT_INT_INVERT_FIELD0 (1 << 20)
+ #define PS_PWRUP_PROGRESS         (1 << 17)
+ #define PS_V_FILTER_BYPASS        (1 << 8)
+ #define PS_VADAPT_EN              (1 << 7)
+ #define PS_VADAPT_MODE_MASK        (3 << 5)
+ #define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
+ #define PS_VADAPT_MODE_MOD_ADAPT   (1 << 5)
+ #define PS_VADAPT_MODE_MOST_ADAPT  (3 << 5)
+ #define _PS_PWR_GATE_1A     0x68160
+ #define _PS_PWR_GATE_2A     0x68260
+ #define _PS_PWR_GATE_1B     0x68960
+ #define _PS_PWR_GATE_2B     0x68A60
+ #define _PS_PWR_GATE_1C     0x69160
+ #define PS_PWR_GATE_DIS_OVERRIDE       (1 << 31)
+ #define PS_PWR_GATE_SETTLING_TIME_32   (0 << 3)
+ #define PS_PWR_GATE_SETTLING_TIME_64   (1 << 3)
+ #define PS_PWR_GATE_SETTLING_TIME_96   (2 << 3)
+ #define PS_PWR_GATE_SETTLING_TIME_128  (3 << 3)
+ #define PS_PWR_GATE_SLPEN_8             0
+ #define PS_PWR_GATE_SLPEN_16            1
+ #define PS_PWR_GATE_SLPEN_24            2
+ #define PS_PWR_GATE_SLPEN_32            3
+ #define _PS_WIN_POS_1A      0x68170
+ #define _PS_WIN_POS_2A      0x68270
+ #define _PS_WIN_POS_1B      0x68970
+ #define _PS_WIN_POS_2B      0x68A70
+ #define _PS_WIN_POS_1C      0x69170
+ #define _PS_WIN_SZ_1A       0x68174
+ #define _PS_WIN_SZ_2A       0x68274
+ #define _PS_WIN_SZ_1B       0x68974
+ #define _PS_WIN_SZ_2B       0x68A74
+ #define _PS_WIN_SZ_1C       0x69174
+ #define _PS_VSCALE_1A       0x68184
+ #define _PS_VSCALE_2A       0x68284
+ #define _PS_VSCALE_1B       0x68984
+ #define _PS_VSCALE_2B       0x68A84
+ #define _PS_VSCALE_1C       0x69184
+ #define _PS_HSCALE_1A       0x68190
+ #define _PS_HSCALE_2A       0x68290
+ #define _PS_HSCALE_1B       0x68990
+ #define _PS_HSCALE_2B       0x68A90
+ #define _PS_HSCALE_1C       0x69190
+ #define _PS_VPHASE_1A       0x68188
+ #define _PS_VPHASE_2A       0x68288
+ #define _PS_VPHASE_1B       0x68988
+ #define _PS_VPHASE_2B       0x68A88
+ #define _PS_VPHASE_1C       0x69188
+ #define _PS_HPHASE_1A       0x68194
+ #define _PS_HPHASE_2A       0x68294
+ #define _PS_HPHASE_1B       0x68994
+ #define _PS_HPHASE_2B       0x68A94
+ #define _PS_HPHASE_1C       0x69194
+ #define _PS_ECC_STAT_1A     0x681D0
+ #define _PS_ECC_STAT_2A     0x682D0
+ #define _PS_ECC_STAT_1B     0x689D0
+ #define _PS_ECC_STAT_2B     0x68AD0
+ #define _PS_ECC_STAT_1C     0x691D0
+ #define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+ #define SKL_PS_CTRL(pipe, id) _PIPE(pipe,        \
+                       _ID(id, _PS_1A_CTRL, _PS_2A_CTRL),       \
+                       _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
+ #define SKL_PS_PWR_GATE(pipe, id) _PIPE(pipe,    \
+                       _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
+                       _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
+ #define SKL_PS_WIN_POS(pipe, id) _PIPE(pipe,     \
+                       _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
+                       _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
+ #define SKL_PS_WIN_SZ(pipe, id)  _PIPE(pipe,     \
+                       _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A),   \
+                       _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
+ #define SKL_PS_VSCALE(pipe, id)  _PIPE(pipe,     \
+                       _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A),   \
+                       _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
+ #define SKL_PS_HSCALE(pipe, id)  _PIPE(pipe,     \
+                       _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A),   \
+                       _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
+ #define SKL_PS_VPHASE(pipe, id)  _PIPE(pipe,     \
+                       _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A),   \
+                       _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
+ #define SKL_PS_HPHASE(pipe, id)  _PIPE(pipe,     \
+                       _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A),   \
+                       _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
+ #define SKL_PS_ECC_STAT(pipe, id)  _PIPE(pipe,     \
+                       _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A),   \
+                       _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)
  /* legacy palette */
  #define _LGC_PALETTE_A           0x4a000
  #define _LGC_PALETTE_B           0x4a800
  #define  GEN8_PIPE_VSYNC              (1 << 1)
  #define  GEN8_PIPE_VBLANK             (1 << 0)
  #define  GEN9_PIPE_CURSOR_FAULT               (1 << 11)
+ #define  GEN9_PIPE_PLANE4_FAULT               (1 << 10)
  #define  GEN9_PIPE_PLANE3_FAULT               (1 << 9)
  #define  GEN9_PIPE_PLANE2_FAULT               (1 << 8)
  #define  GEN9_PIPE_PLANE1_FAULT               (1 << 7)
+ #define  GEN9_PIPE_PLANE4_FLIP_DONE   (1 << 6)
  #define  GEN9_PIPE_PLANE3_FLIP_DONE   (1 << 5)
  #define  GEN9_PIPE_PLANE2_FLIP_DONE   (1 << 4)
  #define  GEN9_PIPE_PLANE1_FLIP_DONE   (1 << 3)
         GEN8_PIPE_PRIMARY_FAULT)
  #define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
        (GEN9_PIPE_CURSOR_FAULT | \
+        GEN9_PIPE_PLANE4_FAULT | \
         GEN9_PIPE_PLANE3_FAULT | \
         GEN9_PIPE_PLANE2_FAULT | \
         GEN9_PIPE_PLANE1_FAULT)
  #define GEN8_DE_PORT_IMR 0x44444
  #define GEN8_DE_PORT_IIR 0x44448
  #define GEN8_DE_PORT_IER 0x4444c
- #define  GEN8_PORT_DP_A_HOTPLUG               (1 << 3)
  #define  GEN9_AUX_CHANNEL_D           (1 << 27)
  #define  GEN9_AUX_CHANNEL_C           (1 << 26)
  #define  GEN9_AUX_CHANNEL_B           (1 << 25)
+ #define  BXT_DE_PORT_HP_DDIC          (1 << 5)
+ #define  BXT_DE_PORT_HP_DDIB          (1 << 4)
+ #define  BXT_DE_PORT_HP_DDIA          (1 << 3)
+ #define  BXT_DE_PORT_HOTPLUG_MASK     (BXT_DE_PORT_HP_DDIA | \
+                                        BXT_DE_PORT_HP_DDIB | \
+                                        BXT_DE_PORT_HP_DDIC)
+ #define  GEN8_PORT_DP_A_HOTPLUG               (1 << 3)
+ #define  BXT_DE_PORT_GMBUS            (1 << 1)
  #define  GEN8_AUX_CHANNEL_A           (1 << 0)
  
  #define GEN8_DE_MISC_ISR 0x44460
  #define GEN8_PCU_IIR 0x444e8
  #define GEN8_PCU_IER 0x444ec
  
+ /* BXT hotplug control */
+ #define BXT_HOTPLUG_CTL                       0xC4030
+ #define   BXT_DDIA_HPD_ENABLE         (1 << 28)
+ #define   BXT_DDIA_HPD_STATUS         (3 << 24)
+ #define   BXT_DDIC_HPD_ENABLE         (1 << 12)
+ #define   BXT_DDIC_HPD_STATUS         (3 << 8)
+ #define   BXT_DDIB_HPD_ENABLE         (1 << 4)
+ #define   BXT_DDIB_HPD_STATUS         (3 << 0)
+ #define   BXT_HOTPLUG_CTL_MASK                (BXT_DDIA_HPD_ENABLE | \
+                                        BXT_DDIB_HPD_ENABLE | \
+                                        BXT_DDIC_HPD_ENABLE)
+ #define   BXT_HPD_STATUS_MASK         (BXT_DDIA_HPD_STATUS | \
+                                        BXT_DDIB_HPD_STATUS | \
+                                        BXT_DDIC_HPD_STATUS)
  #define ILK_DISPLAY_CHICKEN2  0x42004
  /* Required on all Ironlake and Sandybridge according to the B-Spec. */
  #define  ILK_ELPIN_409_SELECT (1 << 25)
  #define  DISP_FBC_WM_DIS              (1<<15)
  #define DISP_ARB_CTL2 0x45004
  #define  DISP_DATA_PARTITION_5_6      (1<<6)
+ #define DBUF_CTL      0x45008
+ #define  DBUF_POWER_REQUEST           (1<<31)
+ #define  DBUF_POWER_STATE             (1<<30)
  #define GEN7_MSG_CTL  0x45010
  #define  WAIT_FOR_PCH_RESET_ACK               (1<<1)
  #define  WAIT_FOR_PCH_FLR_ACK         (1<<0)
  #define GEN7_L3SQCREG1                                0xB010
  #define  VLV_B0_WA_L3SQCREG1_VALUE            0x00D30000
  
+ #define GEN8_L3SQCREG1                                0xB100
+ #define  BDW_WA_L3SQCREG1_DEFAULT             0x784000
  #define GEN7_L3CNTLREG1                               0xB01C
  #define  GEN7_WA_FOR_GEN7_L3_CONTROL                  0x3C47FF8C
  #define  GEN7_L3AGDIS                         (1<<19)
  #define  HDC_FORCE_NON_COHERENT                       (1<<4)
  #define  HDC_BARRIER_PERFORMANCE_DISABLE      (1<<10)
  
+ /* GEN9 chicken */
+ #define SLICE_ECO_CHICKEN0                    0x7308
+ #define   PIXEL_MASK_CAMMING_DISABLE          (1 << 14)
  /* WaCatErrorRejectionIssue */
  #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG                0x9030
  #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB     (1<<11)
  #define  GTFIFOCTL                            0x120008
  #define    GT_FIFO_FREE_ENTRIES_MASK          0x7f
  #define    GT_FIFO_NUM_RESERVED_ENTRIES               20
 +#define    GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
 +#define    GT_FIFO_CTL_RC6_POLICY_STALL               (1 << 11)
  
  #define  HSW_IDICR                            0x9008
  #define    IDIHASHMSK(x)                      (((x) & 0x3f) << 16)
  # define GEN6_CSUNIT_CLOCK_GATE_DISABLE                       (1 << 7)
  
  #define GEN6_UCGCTL2                          0x9404
+ # define GEN6_VFUNIT_CLOCK_GATE_DISABLE                       (1 << 31)
  # define GEN7_VDSUNIT_CLOCK_GATE_DISABLE              (1 << 30)
  # define GEN7_TDLUNIT_CLOCK_GATE_DISABLE              (1 << 22)
  # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE              (1 << 13)
  #define GEN8_UCGCTL6                          0x9430
  #define   GEN8_GAPSUNIT_CLOCK_GATE_DISABLE    (1<<24)
  #define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE     (1<<14)
+ #define   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
  
  #define GEN6_GFXPAUSE                         0xA000
  #define GEN6_RPNSWREQ                         0xA008
  #define GEN9_MEDIA_PG_IDLE_HYSTERESIS         0xA0C4
  #define GEN9_RENDER_PG_IDLE_HYSTERESIS                0xA0C8
  #define GEN9_PG_ENABLE                                0xA210
+ #define GEN9_RENDER_PG_ENABLE                 (1<<0)
+ #define GEN9_MEDIA_PG_ENABLE                  (1<<1)
  
  #define VLV_CHICKEN_3                         (VLV_DISPLAY_BASE + 0x7040C)
  #define  PIXEL_OVERLAP_CNT_MASK                       (3 << 30)
  #define   GEN6_PCODE_WRITE_D_COMP             0x11
  #define   GEN6_ENCODE_RC6_VID(mv)             (((mv) - 245) / 5)
  #define   GEN6_DECODE_RC6_VID(vids)           (((vids) * 5) + 245)
+ #define   HSW_PCODE_DE_WRITE_FREQ_REQ         0x17
  #define   DISPLAY_IPS_CONTROL                 0x19
  #define         HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL  0x1A
  #define GEN6_PCODE_DATA                               0x138128
  #define CHV_POWER_SS1_SIG2            0xa72c
  #define   CHV_EU311_PG_ENABLE         (1<<1)
  
- #define GEN9_SLICE0_PGCTL_ACK         0x804c
- #define GEN9_SLICE1_PGCTL_ACK         0x8050
- #define GEN9_SLICE2_PGCTL_ACK         0x8054
+ #define GEN9_SLICE_PGCTL_ACK(slice)   (0x804c + (slice)*0x4)
  #define   GEN9_PGCTL_SLICE_ACK                (1 << 0)
+ #define   GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
  
- #define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c
- #define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060
- #define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064
- #define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068
- #define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c
- #define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070
+ #define GEN9_SS01_EU_PGCTL_ACK(slice) (0x805c + (slice)*0x8)
+ #define GEN9_SS23_EU_PGCTL_ACK(slice) (0x8060 + (slice)*0x8)
  #define   GEN9_PGCTL_SSA_EU08_ACK     (1 << 0)
  #define   GEN9_PGCTL_SSA_EU19_ACK     (1 << 2)
  #define   GEN9_PGCTL_SSA_EU210_ACK    (1 << 4)
  #define  CDCLK_FREQ_675_617           (3<<26)
  #define  CDCLK_FREQ_DECIMAL_MASK      (0x7ff)
  
+ #define  BXT_CDCLK_CD2X_DIV_SEL_MASK  (3<<22)
+ #define  BXT_CDCLK_CD2X_DIV_SEL_1     (0<<22)
+ #define  BXT_CDCLK_CD2X_DIV_SEL_1_5   (1<<22)
+ #define  BXT_CDCLK_CD2X_DIV_SEL_2     (2<<22)
+ #define  BXT_CDCLK_CD2X_DIV_SEL_4     (3<<22)
+ #define  BXT_CDCLK_SSA_PRECHARGE_ENABLE       (1<<16)
  /* LCPLL_CTL */
  #define LCPLL1_CTL            0x46010
  #define LCPLL2_CTL            0x46014
  #define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
  #define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
  
+ /* BXT display engine PLL */
+ #define BXT_DE_PLL_CTL                        0x6d000
+ #define   BXT_DE_PLL_RATIO(x)         (x)     /* {60,65,100} * 19.2MHz */
+ #define   BXT_DE_PLL_RATIO_MASK               0xff
+ #define BXT_DE_PLL_ENABLE             0x46070
+ #define   BXT_DE_PLL_PLL_ENABLE               (1 << 31)
+ #define   BXT_DE_PLL_LOCK             (1 << 30)
+ /* GEN9 DC */
+ #define DC_STATE_EN                   0x45504
+ #define  DC_STATE_EN_UPTO_DC5         (1<<0)
+ #define  DC_STATE_EN_DC9              (1<<3)
  /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
   * since on HSW we can't write to it using I915_WRITE. */
  #define D_COMP_HSW                    (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
@@@ -12,6 -12,7 +12,6 @@@
  
  #undef TRACE_SYSTEM
  #define TRACE_SYSTEM i915
 -#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
  #define TRACE_INCLUDE_FILE i915_trace
  
  /* pipe updates */
@@@ -220,7 -221,7 +220,7 @@@ DEFINE_EVENT(i915_page_table_entry, i91
  
  DECLARE_EVENT_CLASS(i915_page_table_entry_update,
        TP_PROTO(struct i915_address_space *vm, u32 pde,
-                struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+                struct i915_page_table *pt, u32 first, u32 count, u32 bits),
        TP_ARGS(vm, pde, pt, first, count, bits),
  
        TP_STRUCT__entry(
  
  DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
        TP_PROTO(struct i915_address_space *vm, u32 pde,
-                struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+                struct i915_page_table *pt, u32 first, u32 count, u32 bits),
        TP_ARGS(vm, pde, pt, first, count, bits)
  );
  
@@@ -504,7 -505,6 +504,6 @@@ DECLARE_EVENT_CLASS(i915_gem_request
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, ring)
-                            __field(u32, uniq)
                             __field(u32, seqno)
                             ),
  
                                                i915_gem_request_get_ring(req);
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
-                          __entry->uniq = req ? req->uniq : 0;
                           __entry->seqno = i915_gem_request_get_seqno(req);
                           ),
  
-           TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
-                     __entry->dev, __entry->ring, __entry->uniq,
-                     __entry->seqno)
+           TP_printk("dev=%u, ring=%u, seqno=%u",
+                     __entry->dev, __entry->ring, __entry->seqno)
  );
  
  DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
@@@ -564,7 -562,6 +561,6 @@@ TRACE_EVENT(i915_gem_request_wait_begin
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, ring)
-                            __field(u32, uniq)
                             __field(u32, seqno)
                             __field(bool, blocking)
                             ),
                                                i915_gem_request_get_ring(req);
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
-                          __entry->uniq = req ? req->uniq : 0;
                           __entry->seqno = i915_gem_request_get_seqno(req);
                           __entry->blocking =
                                     mutex_is_locked(&ring->dev->struct_mutex);
                           ),
  
-           TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
-                     __entry->dev, __entry->ring, __entry->uniq,
+           TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
+                     __entry->dev, __entry->ring,
                      __entry->seqno, __entry->blocking ?  "yes (NB)" : "no")
  );
  
@@@ -596,33 -592,6 +591,6 @@@ DEFINE_EVENT(i915_gem_request, i915_gem
            TP_ARGS(req)
  );
  
- DECLARE_EVENT_CLASS(i915_ring,
-           TP_PROTO(struct intel_engine_cs *ring),
-           TP_ARGS(ring),
-           TP_STRUCT__entry(
-                            __field(u32, dev)
-                            __field(u32, ring)
-                            ),
-           TP_fast_assign(
-                          __entry->dev = ring->dev->primary->index;
-                          __entry->ring = ring->id;
-                          ),
-           TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
- );
- DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
-           TP_PROTO(struct intel_engine_cs *ring),
-           TP_ARGS(ring)
- );
- DEFINE_EVENT(i915_ring, i915_ring_wait_end,
-           TP_PROTO(struct intel_engine_cs *ring),
-           TP_ARGS(ring)
- );
  TRACE_EVENT(i915_flip_request,
            TP_PROTO(int plane, struct drm_i915_gem_object *obj),
  
@@@ -162,6 -162,30 +162,30 @@@ static int intel_plane_atomic_check(str
                        (1 << drm_plane_index(plane));
        }
  
+       if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+               if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
+                       state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
+                       DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+                       return -EINVAL;
+               }
+               /*
+                * 90/270 is not allowed with RGB64 16:16:16:16,
+                * RGB 16-bit 5:6:5, and Indexed 8-bit.
+                * TBD: Add RGB64 case once its added in supported format list.
+                */
+               switch (state->fb->pixel_format) {
+               case DRM_FORMAT_C8:
+               case DRM_FORMAT_RGB565:
+                       DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+                                       drm_get_format_name(state->fb->pixel_format));
+                       return -EINVAL;
+               default:
+                       break;
+               }
+       }
        return intel_plane->check_plane(plane, intel_state);
  }
  
@@@ -172,6 -196,10 +196,6 @@@ static void intel_plane_atomic_update(s
        struct intel_plane_state *intel_state =
                to_intel_plane_state(plane->state);
  
 -      /* Don't disable an already disabled plane */
 -      if (!plane->state->fb && !old_state->fb)
 -              return;
 -
        intel_plane->commit_plane(plane, intel_state);
  }
  
@@@ -103,6 -103,10 +103,10 @@@ static void chv_prepare_pll(struct inte
                            const struct intel_crtc_state *pipe_config);
  static void intel_begin_crtc_commit(struct drm_crtc *crtc);
  static void intel_finish_crtc_commit(struct drm_crtc *crtc);
+ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
+       struct intel_crtc_state *crtc_state);
+ static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
+                          int num_connectors);
  
  static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
  {
@@@ -400,6 -404,18 +404,18 @@@ static const intel_limit_t intel_limits
        .p2 = { .p2_slow = 1, .p2_fast = 14 },
  };
  
+ static const intel_limit_t intel_limits_bxt = {
+       /* FIXME: find real dot limits */
+       .dot = { .min = 0, .max = INT_MAX },
+       .vco = { .min = 4800000, .max = 6480000 },
+       .n = { .min = 1, .max = 1 },
+       .m1 = { .min = 2, .max = 2 },
+       /* FIXME: find real m2 limits */
+       .m2 = { .min = 2 << 22, .max = 255 << 22 },
+       .p1 = { .min = 2, .max = 4 },
+       .p2 = { .p2_slow = 1, .p2_fast = 20 },
+ };
  static void vlv_clock(int refclk, intel_clock_t *clock)
  {
        clock->m = clock->m1 * clock->m2;
@@@ -511,7 -527,9 +527,9 @@@ intel_limit(struct intel_crtc_state *cr
        struct drm_device *dev = crtc_state->base.crtc->dev;
        const intel_limit_t *limit;
  
-       if (HAS_PCH_SPLIT(dev))
+       if (IS_BROXTON(dev))
+               limit = &intel_limits_bxt;
+       else if (HAS_PCH_SPLIT(dev))
                limit = intel_ironlake_limit(crtc_state, refclk);
        else if (IS_G4X(dev)) {
                limit = intel_g4x_limit(crtc_state);
@@@ -596,11 -614,11 +614,11 @@@ static bool intel_PLL_is_valid(struct d
        if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
                INTELPllInvalid("m1 out of range\n");
  
-       if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
+       if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
                if (clock->m1 <= clock->m2)
                        INTELPllInvalid("m1 <= m2\n");
  
-       if (!IS_VALLEYVIEW(dev)) {
+       if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
                if (clock->p < limit->p.min || limit->p.max < clock->p)
                        INTELPllInvalid("p out of range\n");
                if (clock->m < limit->m.min || limit->m.max < clock->m)
@@@ -953,6 -971,15 +971,15 @@@ chv_find_best_dpll(const intel_limit_t 
        return found;
  }
  
+ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+                       intel_clock_t *best_clock)
+ {
+       int refclk = i9xx_get_refclk(crtc_state, 0);
+       return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
+                                 target_clock, refclk, NULL, best_clock);
+ }
  bool intel_crtc_active(struct drm_crtc *crtc)
  {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@@ -2104,7 -2131,7 +2131,7 @@@ static void intel_enable_pipe(struct in
         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
         * need the check.
         */
-       if (!HAS_PCH_SPLIT(dev_priv->dev))
+       if (HAS_GMCH_DISPLAY(dev_priv->dev))
                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
@@@ -2338,13 -2365,6 +2365,6 @@@ intel_fill_fb_ggtt_view(struct i915_ggt
        info->pitch = fb->pitches[0];
        info->fb_modifier = fb->modifier[0];
  
-       if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED ||
-             info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) {
-               DRM_DEBUG_KMS(
-                             "Y or Yf tiling is needed for 90/270 rotation!\n");
-               return -EINVAL;
-       }
        return 0;
  }
  
@@@ -2936,6 -2956,35 +2956,35 @@@ unsigned long intel_plane_obj_offset(st
        return i915_gem_obj_ggtt_offset_view(obj, view);
  }
  
+ /*
+  * This function detaches (aka. unbinds) unused scalers in hardware
+  */
+ void skl_detach_scalers(struct intel_crtc *intel_crtc)
+ {
+       struct drm_device *dev;
+       struct drm_i915_private *dev_priv;
+       struct intel_crtc_scaler_state *scaler_state;
+       int i;
+       if (!intel_crtc || !intel_crtc->config)
+               return;
+       dev = intel_crtc->base.dev;
+       dev_priv = dev->dev_private;
+       scaler_state = &intel_crtc->config->scaler_state;
+       /* loop through and disable scalers that aren't in use */
+       for (i = 0; i < intel_crtc->num_scalers; i++) {
+               if (!scaler_state->scalers[i].in_use) {
+                       I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0);
+                       I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0);
+                       I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0);
+                       DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
+                               intel_crtc->base.base.id, intel_crtc->pipe, i);
+               }
+       }
+ }
  static void skylake_update_primary_plane(struct drm_crtc *crtc,
                                         struct drm_framebuffer *fb,
                                         int x, int y)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_i915_gem_object *obj;
        int pipe = intel_crtc->pipe;
-       u32 plane_ctl, stride_div;
+       u32 plane_ctl, stride_div, stride;
+       u32 tile_height, plane_offset, plane_size;
+       unsigned int rotation;
+       int x_offset, y_offset;
        unsigned long surf_addr;
+       struct drm_plane *plane;
  
        if (!intel_crtc->primary_enabled) {
                I915_WRITE(PLANE_CTL(pipe, 0), 0);
        }
  
        plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-       if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
+       plane = crtc->primary;
+       rotation = plane->state->rotation;
+       switch (rotation) {
+       case BIT(DRM_ROTATE_90):
+               plane_ctl |= PLANE_CTL_ROTATE_90;
+               break;
+       case BIT(DRM_ROTATE_180):
                plane_ctl |= PLANE_CTL_ROTATE_180;
+               break;
+       case BIT(DRM_ROTATE_270):
+               plane_ctl |= PLANE_CTL_ROTATE_270;
+               break;
+       }
  
        obj = intel_fb_obj(fb);
        stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
                                               fb->pixel_format);
-       surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj);
+       surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj);
+       if (intel_rotation_90_or_270(rotation)) {
+               /* stride = Surface height in tiles */
+               tile_height = intel_tile_height(dev, fb->bits_per_pixel,
+                                               fb->modifier[0]);
+               stride = DIV_ROUND_UP(fb->height, tile_height);
+               x_offset = stride * tile_height - y - (plane->state->src_h >> 16);
+               y_offset = x;
+               plane_size = ((plane->state->src_w >> 16) - 1) << 16 |
+                                       ((plane->state->src_h >> 16) - 1);
+       } else {
+               stride = fb->pitches[0] / stride_div;
+               x_offset = x;
+               y_offset = y;
+               plane_size = ((plane->state->src_h >> 16) - 1) << 16 |
+                       ((plane->state->src_w >> 16) - 1);
+       }
+       plane_offset = y_offset << 16 | x_offset;
  
        I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
        I915_WRITE(PLANE_POS(pipe, 0), 0);
-       I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
-       I915_WRITE(PLANE_SIZE(pipe, 0),
-                  (intel_crtc->config->pipe_src_h - 1) << 16 |
-                  (intel_crtc->config->pipe_src_w - 1));
-       I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
+       I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
+       I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
+       I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
        I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
  
        POSTING_READ(PLANE_SURF(pipe, 0));
@@@ -4126,6 -4209,26 +4209,26 @@@ struct intel_shared_dpll *intel_get_sha
                goto found;
        }
  
+       if (IS_BROXTON(dev_priv->dev)) {
+               /* PLL is attached to port in bxt */
+               struct intel_encoder *encoder;
+               struct intel_digital_port *intel_dig_port;
+               encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
+               if (WARN_ON(!encoder))
+                       return NULL;
+               intel_dig_port = enc_to_dig_port(&encoder->base);
+               /* 1:1 mapping between ports and PLLs */
+               i = (enum intel_dpll_id)intel_dig_port->port;
+               pll = &dev_priv->shared_dplls[i];
+               DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+                       crtc->base.base.id, pll->name);
+               WARN_ON(pll->new_config->crtc_mask);
+               goto found;
+       }
        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
                pll = &dev_priv->shared_dplls[i];
  
@@@ -4251,16 -4354,175 +4354,175 @@@ static void cpt_verify_modeset(struct d
        }
  }
  
- static void skylake_pfit_enable(struct intel_crtc *crtc)
+ /**
+  * skl_update_scaler_users - Stages update to crtc's scaler state
+  * @intel_crtc: crtc
+  * @crtc_state: crtc_state
+  * @plane: plane (NULL indicates crtc is requesting update)
+  * @plane_state: plane's state
+  * @force_detach: request unconditional detachment of scaler
+  *
+  * This function updates scaler state for requested plane or crtc.
+  * To request scaler usage update for a plane, caller shall pass plane pointer.
+  * To request scaler usage update for crtc, caller shall pass plane pointer
+  * as NULL.
+  *
+  * Return
+  *     0 - scaler_usage updated successfully
+  *    error - requested scaling cannot be supported or other error condition
+  */
+ int
+ skl_update_scaler_users(
+       struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state,
+       struct intel_plane *intel_plane, struct intel_plane_state *plane_state,
+       int force_detach)
+ {
+       int need_scaling;
+       int idx;
+       int src_w, src_h, dst_w, dst_h;
+       int *scaler_id;
+       struct drm_framebuffer *fb;
+       struct intel_crtc_scaler_state *scaler_state;
+       if (!intel_crtc || !crtc_state)
+               return 0;
+       scaler_state = &crtc_state->scaler_state;
+       idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX;
+       fb = intel_plane ? plane_state->base.fb : NULL;
+       if (intel_plane) {
+               src_w = drm_rect_width(&plane_state->src) >> 16;
+               src_h = drm_rect_height(&plane_state->src) >> 16;
+               dst_w = drm_rect_width(&plane_state->dst);
+               dst_h = drm_rect_height(&plane_state->dst);
+               scaler_id = &plane_state->scaler_id;
+       } else {
+               struct drm_display_mode *adjusted_mode =
+                       &crtc_state->base.adjusted_mode;
+               src_w = crtc_state->pipe_src_w;
+               src_h = crtc_state->pipe_src_h;
+               dst_w = adjusted_mode->hdisplay;
+               dst_h = adjusted_mode->vdisplay;
+               scaler_id = &scaler_state->scaler_id;
+       }
+       need_scaling = (src_w != dst_w || src_h != dst_h);
+       /*
+        * if plane is being disabled or scaler is no more required or force detach
+        *  - free scaler binded to this plane/crtc
+        *  - in order to do this, update crtc->scaler_usage
+        *
+        * Here scaler state in crtc_state is set free so that
+        * scaler can be assigned to other user. Actual register
+        * update to free the scaler is done in plane/panel-fit programming.
+        * For this purpose crtc/plane_state->scaler_id isn't reset here.
+        */
+       if (force_detach || !need_scaling || (intel_plane &&
+               (!fb || !plane_state->visible))) {
+               if (*scaler_id >= 0) {
+                       scaler_state->scaler_users &= ~(1 << idx);
+                       scaler_state->scalers[*scaler_id].in_use = 0;
+                       DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d "
+                               "crtc_state = %p scaler_users = 0x%x\n",
+                               intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC",
+                               intel_plane ? intel_plane->base.base.id :
+                               intel_crtc->base.base.id, crtc_state,
+                               scaler_state->scaler_users);
+                       *scaler_id = -1;
+               }
+               return 0;
+       }
+       /* range checks */
+       if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+               dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+               src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
+               DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u "
+                       "size is out of scaler range\n",
+                       intel_plane ? "PLANE" : "CRTC",
+                       intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
+                       intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h);
+               return -EINVAL;
+       }
+       /* check colorkey */
+       if (intel_plane && intel_plane->ckey.flags != I915_SET_COLORKEY_NONE) {
+               DRM_DEBUG_KMS("PLANE:%d scaling with color key not allowed",
+                       intel_plane->base.base.id);
+               return -EINVAL;
+       }
+       /* Check src format */
+       if (intel_plane) {
+               switch (fb->pixel_format) {
+               case DRM_FORMAT_RGB565:
+               case DRM_FORMAT_XBGR8888:
+               case DRM_FORMAT_XRGB8888:
+               case DRM_FORMAT_ABGR8888:
+               case DRM_FORMAT_ARGB8888:
+               case DRM_FORMAT_XRGB2101010:
+               case DRM_FORMAT_ARGB2101010:
+               case DRM_FORMAT_XBGR2101010:
+               case DRM_FORMAT_ABGR2101010:
+               case DRM_FORMAT_YUYV:
+               case DRM_FORMAT_YVYU:
+               case DRM_FORMAT_UYVY:
+               case DRM_FORMAT_VYUY:
+                       break;
+               default:
+                       DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n",
+                               intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+                       return -EINVAL;
+               }
+       }
+       /* mark this plane as a scaler user in crtc_state */
+       scaler_state->scaler_users |= (1 << idx);
+       DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u "
+               "crtc_state = %p scaler_users = 0x%x\n",
+               intel_plane ? "PLANE" : "CRTC",
+               intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
+               src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users);
+       return 0;
+ }
+ static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = crtc->pipe;
+       struct intel_crtc_scaler_state *scaler_state =
+               &crtc->config->scaler_state;
+       DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
+       /* To update pfit, first update scaler state */
+       skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable);
+       intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config);
+       skl_detach_scalers(crtc);
+       if (!enable)
+               return;
  
        if (crtc->config->pch_pfit.enabled) {
-               I915_WRITE(PS_CTL(pipe), PS_ENABLE);
-               I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos);
-               I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size);
+               int id;
+               if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
+                       DRM_ERROR("Requesting pfit without getting a scaler first\n");
+                       return;
+               }
+               id = scaler_state->scaler_id;
+               I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+                       PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+               I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
+               I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
+               DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
        }
  }
  
@@@ -4404,7 -4666,7 +4666,7 @@@ static void intel_crtc_load_lut(struct 
        if (!crtc->state->enable || !intel_crtc->active)
                return;
  
-       if (!HAS_PCH_SPLIT(dev_priv->dev)) {
+       if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
                if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
@@@ -4664,10 -4926,12 +4926,12 @@@ static void haswell_crtc_enable(struct 
  
        intel_ddi_enable_pipe_clock(intel_crtc);
  
-       if (IS_SKYLAKE(dev))
-               skylake_pfit_enable(intel_crtc);
-       else
+       if (INTEL_INFO(dev)->gen == 9)
+               skylake_pfit_update(intel_crtc, 1);
+       else if (INTEL_INFO(dev)->gen < 9)
                ironlake_pfit_enable(intel_crtc);
+       else
+               MISSING_CASE(INTEL_INFO(dev)->gen);
  
        /*
         * On ILK+ LUT must be loaded before the pipe is running but with
        intel_crtc_enable_planes(crtc);
  }
  
- static void skylake_pfit_disable(struct intel_crtc *crtc)
- {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe = crtc->pipe;
-       /* To avoid upsetting the power well on haswell only disable the pfit if
-        * it's in use. The hw state code will make sure we get this right. */
-       if (crtc->config->pch_pfit.enabled) {
-               I915_WRITE(PS_CTL(pipe), 0);
-               I915_WRITE(PS_WIN_POS(pipe), 0);
-               I915_WRITE(PS_WIN_SZ(pipe), 0);
-       }
- }
  static void ironlake_pfit_disable(struct intel_crtc *crtc)
  {
        struct drm_device *dev = crtc->base.dev;
@@@ -4827,10 -5076,12 +5076,12 @@@ static void haswell_crtc_disable(struc
  
        intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
  
-       if (IS_SKYLAKE(dev))
-               skylake_pfit_disable(intel_crtc);
-       else
+       if (INTEL_INFO(dev)->gen == 9)
+               skylake_pfit_update(intel_crtc, 0);
+       else if (INTEL_INFO(dev)->gen < 9)
                ironlake_pfit_disable(intel_crtc);
+       else
+               MISSING_CASE(INTEL_INFO(dev)->gen);
  
        intel_ddi_disable_pipe_clock(intel_crtc);
  
@@@ -4994,6 -5245,181 +5245,181 @@@ static void modeset_update_crtc_power_d
        intel_display_set_init_power(dev_priv, false);
  }
  
+ void broxton_set_cdclk(struct drm_device *dev, int frequency)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t divider;
+       uint32_t ratio;
+       uint32_t current_freq;
+       int ret;
+       /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
+       switch (frequency) {
+       case 144000:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+               ratio = BXT_DE_PLL_RATIO(60);
+               break;
+       case 288000:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_2;
+               ratio = BXT_DE_PLL_RATIO(60);
+               break;
+       case 384000:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+               ratio = BXT_DE_PLL_RATIO(60);
+               break;
+       case 576000:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+               ratio = BXT_DE_PLL_RATIO(60);
+               break;
+       case 624000:
+               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+               ratio = BXT_DE_PLL_RATIO(65);
+               break;
+       case 19200:
+               /*
+                * Bypass frequency with DE PLL disabled. Init ratio, divider
+                * to suppress GCC warning.
+                */
+               ratio = 0;
+               divider = 0;
+               break;
+       default:
+               DRM_ERROR("unsupported CDCLK freq %d", frequency);
+               return;
+       }
+       mutex_lock(&dev_priv->rps.hw_lock);
+       /* Inform power controller of upcoming frequency change */
+       ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+                                     0x80000000);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       if (ret) {
+               DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
+                         ret, frequency);
+               return;
+       }
+       current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
+       /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
+       current_freq = current_freq * 500 + 1000;
+       /*
+        * DE PLL has to be disabled when
+        * - setting to 19.2MHz (bypass, PLL isn't used)
+        * - before setting to 624MHz (PLL needs toggling)
+        * - before setting to any frequency from 624MHz (PLL needs toggling)
+        */
+       if (frequency == 19200 || frequency == 624000 ||
+           current_freq == 624000) {
+               I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
+               /* Timeout 200us */
+               if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
+                            1))
+                       DRM_ERROR("timout waiting for DE PLL unlock\n");
+       }
+       if (frequency != 19200) {
+               uint32_t val;
+               val = I915_READ(BXT_DE_PLL_CTL);
+               val &= ~BXT_DE_PLL_RATIO_MASK;
+               val |= ratio;
+               I915_WRITE(BXT_DE_PLL_CTL, val);
+               I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+               /* Timeout 200us */
+               if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
+                       DRM_ERROR("timeout waiting for DE PLL lock\n");
+               val = I915_READ(CDCLK_CTL);
+               val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
+               val |= divider;
+               /*
+                * Disable SSA Precharge when CD clock frequency < 500 MHz,
+                * enable otherwise.
+                */
+               val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+               if (frequency >= 500000)
+                       val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+               val &= ~CDCLK_FREQ_DECIMAL_MASK;
+               /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+               val |= (frequency - 1000) / 500;
+               I915_WRITE(CDCLK_CTL, val);
+       }
+       mutex_lock(&dev_priv->rps.hw_lock);
+       ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+                                     DIV_ROUND_UP(frequency, 25000));
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       if (ret) {
+               DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+                         ret, frequency);
+               return;
+       }
+       dev_priv->cdclk_freq = frequency;
+ }
+ void broxton_init_cdclk(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t val;
+       /*
+        * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+        * or else the reset will hang because there is no PCH to respond.
+        * Move the handshake programming to initialization sequence.
+        * Previously was left up to BIOS.
+        */
+       val = I915_READ(HSW_NDE_RSTWRN_OPT);
+       val &= ~RESET_PCH_HANDSHAKE_ENABLE;
+       I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+       /* Enable PG1 for cdclk */
+       intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+       /* check if cd clock is enabled */
+       if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
+               DRM_DEBUG_KMS("Display already initialized\n");
+               return;
+       }
+       /*
+        * FIXME:
+        * - The initial CDCLK needs to be read from VBT.
+        *   Need to make this change after VBT has changes for BXT.
+        * - check if setting the max (or any) cdclk freq is really necessary
+        *   here, it belongs to modeset time
+        */
+       broxton_set_cdclk(dev, 624000);
+       I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+       udelay(10);
+       if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+               DRM_ERROR("DBuf power enable timeout!\n");
+ }
+ void broxton_uninit_cdclk(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+       udelay(10);
+       if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+               DRM_ERROR("DBuf power disable timeout!\n");
+       /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
+       broxton_set_cdclk(dev, 19200);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ }
  /* returns HPLL frequency in kHz */
  static int valleyview_get_vco(struct drm_i915_private *dev_priv)
  {
@@@ -5012,16 -5438,16 +5438,16 @@@ static void vlv_update_cdclk(struct drm
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+       dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
        DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
-                        dev_priv->vlv_cdclk_freq);
+                        dev_priv->cdclk_freq);
  
        /*
         * Program the gmbus_freq based on the cdclk frequency.
         * BSpec erroneously claims we should aim for 4MHz, but
         * in fact 1MHz is the correct frequency.
         */
-       I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
+       I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
  }
  
  /* Adjust CDclk dividers to allow high res or save power if possible */
@@@ -5030,7 -5456,8 +5456,8 @@@ static void valleyview_set_cdclk(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val, cmd;
  
-       WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
+       WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+                                       != dev_priv->cdclk_freq);
  
        if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
                cmd = 2;
@@@ -5094,7 -5521,8 +5521,8 @@@ static void cherryview_set_cdclk(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val, cmd;
  
-       WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
+       WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+                                               != dev_priv->cdclk_freq);
  
        switch (cdclk) {
        case 333333:
@@@ -5159,37 -5587,74 +5587,74 @@@ static int valleyview_calc_cdclk(struc
                return 200000;
  }
  
+ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
+                             int max_pixclk)
+ {
+       /*
+        * FIXME:
+        * - remove the guardband, it's not needed on BXT
+        * - set 19.2MHz bypass frequency if there are no active pipes
+        */
+       if (max_pixclk > 576000*9/10)
+               return 624000;
+       else if (max_pixclk > 384000*9/10)
+               return 576000;
+       else if (max_pixclk > 288000*9/10)
+               return 384000;
+       else if (max_pixclk > 144000*9/10)
+               return 288000;
+       else
+               return 144000;
+ }
  /* compute the max pixel clock for new configuration */
- static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
+ static int intel_mode_max_pixclk(struct drm_atomic_state *state)
  {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = state->dev;
        struct intel_crtc *intel_crtc;
+       struct intel_crtc_state *crtc_state;
        int max_pixclk = 0;
  
        for_each_intel_crtc(dev, intel_crtc) {
-               if (intel_crtc->new_enabled)
-                       max_pixclk = max(max_pixclk,
-                                        intel_crtc->new_config->base.adjusted_mode.crtc_clock);
+               crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+               if (!crtc_state->base.enable)
+                       continue;
+               max_pixclk = max(max_pixclk,
+                                crtc_state->base.adjusted_mode.crtc_clock);
        }
  
        return max_pixclk;
  }
  
- static void valleyview_modeset_global_pipes(struct drm_device *dev,
+ static int valleyview_modeset_global_pipes(struct drm_atomic_state *state,
                                            unsigned *prepare_pipes)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
        struct intel_crtc *intel_crtc;
-       int max_pixclk = intel_mode_max_pixclk(dev_priv);
+       int max_pixclk = intel_mode_max_pixclk(state);
+       int cdclk;
  
-       if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
-           dev_priv->vlv_cdclk_freq)
-               return;
+       if (max_pixclk < 0)
+               return max_pixclk;
+       if (IS_VALLEYVIEW(dev_priv))
+               cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+       else
+               cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+       if (cdclk == dev_priv->cdclk_freq)
+               return 0;
  
        /* disable/enable all currently active pipes while we change cdclk */
-       for_each_intel_crtc(dev, intel_crtc)
+       for_each_intel_crtc(state->dev, intel_crtc)
                if (intel_crtc->base.state->enable)
                        *prepare_pipes |= (1 << intel_crtc->pipe);
+       return 0;
  }
  
  static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
        else
                default_credits = PFI_CREDIT(8);
  
-       if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
+       if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
                /* CHV suggested value is 31 or 63 */
                if (IS_CHERRYVIEW(dev_priv))
                        credits = PFI_CREDIT_31;
@@@ -5232,10 -5697,20 +5697,20 @@@ static void valleyview_modeset_global_r
  {
        struct drm_device *dev = state->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int max_pixclk = intel_mode_max_pixclk(dev_priv);
-       int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+       int max_pixclk = intel_mode_max_pixclk(state);
+       int req_cdclk;
+       /* The only reason this can fail is if we fail to add the crtc_state
+        * to the atomic state. But that can't happen since the call to
+        * intel_mode_max_pixclk() in valleyview_modeset_global_pipes() (which
+        * can't have failed otherwise the mode set would be aborted) added all
+        * the states already. */
+       if (WARN_ON(max_pixclk < 0))
+               return;
  
-       if (req_cdclk != dev_priv->vlv_cdclk_freq) {
+       req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+       if (req_cdclk != dev_priv->cdclk_freq) {
                /*
                 * FIXME: We can end up here with all power domains off, yet
                 * with a CDCLK frequency other than the minimum. To account
@@@ -5554,7 -6029,7 +6029,7 @@@ static void intel_crtc_disable(struct d
        dev_priv->display.crtc_disable(crtc);
        dev_priv->display.off(crtc);
  
-       crtc->primary->funcs->disable_plane(crtc->primary);
+       drm_plane_helper_disable(crtc->primary);
  
        /* Update computed state. */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@@ -5695,65 -6170,80 +6170,80 @@@ bool intel_connector_get_hw_state(struc
        return encoder->get_hw_state(encoder, &pipe);
  }
  
- static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe)
+ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
  {
-       struct intel_crtc *crtc =
-               to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
-       if (crtc->base.state->enable &&
-           crtc->config->has_pch_encoder)
-               return crtc->config->fdi_lanes;
+       if (crtc_state->base.enable && crtc_state->has_pch_encoder)
+               return crtc_state->fdi_lanes;
  
        return 0;
  }
  
- static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
                                     struct intel_crtc_state *pipe_config)
  {
+       struct drm_atomic_state *state = pipe_config->base.state;
+       struct intel_crtc *other_crtc;
+       struct intel_crtc_state *other_crtc_state;
        DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
                      pipe_name(pipe), pipe_config->fdi_lanes);
        if (pipe_config->fdi_lanes > 4) {
                DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
                              pipe_name(pipe), pipe_config->fdi_lanes);
-               return false;
+               return -EINVAL;
        }
  
        if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                if (pipe_config->fdi_lanes > 2) {
                        DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
                                      pipe_config->fdi_lanes);
-                       return false;
+                       return -EINVAL;
                } else {
-                       return true;
+                       return 0;
                }
        }
  
        if (INTEL_INFO(dev)->num_pipes == 2)
-               return true;
+               return 0;
  
        /* Ivybridge 3 pipe is really complicated */
        switch (pipe) {
        case PIPE_A:
-               return true;
+               return 0;
        case PIPE_B:
-               if (pipe_config->fdi_lanes > 2 &&
-                   pipe_required_fdi_lanes(dev, PIPE_C) > 0) {
+               if (pipe_config->fdi_lanes <= 2)
+                       return 0;
+               other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
+               other_crtc_state =
+                       intel_atomic_get_crtc_state(state, other_crtc);
+               if (IS_ERR(other_crtc_state))
+                       return PTR_ERR(other_crtc_state);
+               if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
                        DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
                                      pipe_name(pipe), pipe_config->fdi_lanes);
-                       return false;
+                       return -EINVAL;
                }
-               return true;
+               return 0;
        case PIPE_C:
                if (pipe_config->fdi_lanes > 2) {
                        DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
                                      pipe_name(pipe), pipe_config->fdi_lanes);
-                       return false;
+                       return -EINVAL;
                }
-               if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) {
+               other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
+               other_crtc_state =
+                       intel_atomic_get_crtc_state(state, other_crtc);
+               if (IS_ERR(other_crtc_state))
+                       return PTR_ERR(other_crtc_state);
+               if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
                        DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
-                       return false;
+                       return -EINVAL;
                }
-               return true;
+               return 0;
        default:
                BUG();
        }
@@@ -5765,8 -6255,8 +6255,8 @@@ static int ironlake_fdi_compute_config(
  {
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int lane, link_bw, fdi_dotclock;
-       bool setup_ok, needs_recompute = false;
+       int lane, link_bw, fdi_dotclock, ret;
+       bool needs_recompute = false;
  
  retry:
        /* FDI is a binary signal running at ~2.7GHz, encoding
        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
                               link_bw, &pipe_config->fdi_m_n);
  
-       setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
-                                           intel_crtc->pipe, pipe_config);
-       if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
+       ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
+                                      intel_crtc->pipe, pipe_config);
+       if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
                pipe_config->pipe_bpp -= 2*3;
                DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
                              pipe_config->pipe_bpp);
        if (needs_recompute)
                return RETRY;
  
-       return setup_ok ? 0 : -EINVAL;
+       return ret;
  }
  
  static void hsw_compute_ips_config(struct intel_crtc *crtc,
@@@ -5820,6 -6310,7 +6310,7 @@@ static int intel_crtc_compute_config(st
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+       int ret;
  
        /* FIXME should check pixel clock limits on all platforms */
        if (INTEL_INFO(dev)->gen < 4) {
                adjusted_mode->hsync_start == adjusted_mode->hdisplay)
                return -EINVAL;
  
-       if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
-               pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
-       } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
-               /* only a 8bpc pipe, with 6bpc dither through the panel fitter
-                * for lvds. */
-               pipe_config->pipe_bpp = 8*3;
-       }
        if (HAS_IPS(dev))
                hsw_compute_ips_config(crtc, pipe_config);
  
        if (pipe_config->has_pch_encoder)
                return ironlake_fdi_compute_config(crtc, pipe_config);
  
-       return 0;
+       /* FIXME: remove below call once atomic mode set is place and all crtc
+        * related checks called from atomic_crtc_check function */
+       ret = 0;
+       DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n",
+               crtc, pipe_config->base.state);
+       ret = intel_atomic_setup_scalers(dev, crtc, pipe_config);
+       return ret;
+ }
+ static int skylake_get_display_clock_speed(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
+       uint32_t cdctl = I915_READ(CDCLK_CTL);
+       uint32_t linkrate;
+       if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
+               WARN(1, "LCPLL1 not enabled\n");
+               return 24000; /* 24MHz is the cd freq with NSSC ref */
+       }
+       if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
+               return 540000;
+       linkrate = (I915_READ(DPLL_CTRL1) &
+                   DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+       if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
+           linkrate == DPLL_CRTL1_LINK_RATE_1080) {
+               /* vco 8640 */
+               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+               case CDCLK_FREQ_450_432:
+                       return 432000;
+               case CDCLK_FREQ_337_308:
+                       return 308570;
+               case CDCLK_FREQ_675_617:
+                       return 617140;
+               default:
+                       WARN(1, "Unknown cd freq selection\n");
+               }
+       } else {
+               /* vco 8100 */
+               switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+               case CDCLK_FREQ_450_432:
+                       return 450000;
+               case CDCLK_FREQ_337_308:
+                       return 337500;
+               case CDCLK_FREQ_675_617:
+                       return 675000;
+               default:
+                       WARN(1, "Unknown cd freq selection\n");
+               }
+       }
+       /* error case, do as if DPLL0 isn't enabled */
+       return 24000;
+ }
+ static int broadwell_get_display_clock_speed(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t lcpll = I915_READ(LCPLL_CTL);
+       uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+       if (lcpll & LCPLL_CD_SOURCE_FCLK)
+               return 800000;
+       else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+               return 450000;
+       else if (freq == LCPLL_CLK_FREQ_450)
+               return 450000;
+       else if (freq == LCPLL_CLK_FREQ_54O_BDW)
+               return 540000;
+       else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+               return 337500;
+       else
+               return 675000;
+ }
+ static int haswell_get_display_clock_speed(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t lcpll = I915_READ(LCPLL_CTL);
+       uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+       if (lcpll & LCPLL_CD_SOURCE_FCLK)
+               return 800000;
+       else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+               return 450000;
+       else if (freq == LCPLL_CLK_FREQ_450)
+               return 450000;
+       else if (IS_HSW_ULT(dev))
+               return 337500;
+       else
+               return 540000;
  }
  
  static int valleyview_get_display_clock_speed(struct drm_device *dev)
        return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
  }
  
+ static int ilk_get_display_clock_speed(struct drm_device *dev)
+ {
+       return 450000;
+ }
  static int i945_get_display_clock_speed(struct drm_device *dev)
  {
        return 400000;
  
  static int i915_get_display_clock_speed(struct drm_device *dev)
  {
-       return 333000;
+       return 333333;
  }
  
  static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
@@@ -5922,19 -6504,19 +6504,19 @@@ static int pnv_get_display_clock_speed(
  
        switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
        case GC_DISPLAY_CLOCK_267_MHZ_PNV:
-               return 267000;
+               return 266667;
        case GC_DISPLAY_CLOCK_333_MHZ_PNV:
-               return 333000;
+               return 333333;
        case GC_DISPLAY_CLOCK_444_MHZ_PNV:
-               return 444000;
+               return 444444;
        case GC_DISPLAY_CLOCK_200_MHZ_PNV:
                return 200000;
        default:
                DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
        case GC_DISPLAY_CLOCK_133_MHZ_PNV:
-               return 133000;
+               return 133333;
        case GC_DISPLAY_CLOCK_167_MHZ_PNV:
-               return 167000;
+               return 166667;
        }
  }
  
@@@ -5945,11 -6527,11 +6527,11 @@@ static int i915gm_get_display_clock_spe
        pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
  
        if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
-               return 133000;
+               return 133333;
        else {
                switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
                case GC_DISPLAY_CLOCK_333_MHZ:
-                       return 333000;
+                       return 333333;
                default:
                case GC_DISPLAY_CLOCK_190_200_MHZ:
                        return 190000;
  
  static int i865_get_display_clock_speed(struct drm_device *dev)
  {
-       return 266000;
+       return 266667;
  }
  
  static int i855_get_display_clock_speed(struct drm_device *dev)
        case GC_CLOCK_166_250:
                return 250000;
        case GC_CLOCK_100_133:
-               return 133000;
+               return 133333;
        }
  
        /* Shouldn't happen */
  
  static int i830_get_display_clock_speed(struct drm_device *dev)
  {
-       return 133000;
+       return 133333;
  }
  
  static void
@@@ -6037,7 -6619,7 +6619,7 @@@ static int i9xx_get_refclk(const struc
  
        WARN_ON(!crtc_state->base.state);
  
-       if (IS_VALLEYVIEW(dev)) {
+       if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
                refclk = 100000;
        } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
            intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
@@@ -7980,14 -8562,28 +8562,28 @@@ static void skylake_get_pfit_config(str
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t tmp;
+       struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
+       uint32_t ps_ctrl = 0;
+       int id = -1;
+       int i;
  
-       tmp = I915_READ(PS_CTL(crtc->pipe));
+       /* find scaler attached to this pipe */
+       for (i = 0; i < crtc->num_scalers; i++) {
+               ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
+               if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
+                       id = i;
+                       pipe_config->pch_pfit.enabled = true;
+                       pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
+                       pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+                       break;
+               }
+       }
  
-       if (tmp & PS_ENABLE) {
-               pipe_config->pch_pfit.enabled = true;
-               pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
-               pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
+       scaler_state->scaler_id = id;
+       if (id >= 0) {
+               scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
+       } else {
+               scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
        }
  }
  
@@@ -8472,6 -9068,23 +9068,23 @@@ void hsw_disable_pc8(struct drm_i915_pr
        intel_prepare_ddi(dev);
  }
  
+ static void broxton_modeset_global_resources(struct drm_atomic_state *state)
+ {
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int max_pixclk = intel_mode_max_pixclk(state);
+       int req_cdclk;
+       /* see the comment in valleyview_modeset_global_resources */
+       if (WARN_ON(max_pixclk < 0))
+               return;
+       req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+       if (req_cdclk != dev_priv->cdclk_freq)
+               broxton_set_cdclk(dev, req_cdclk);
+ }
  static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
                                      struct intel_crtc_state *crtc_state)
  {
        return 0;
  }
  
+ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_state *pipe_config)
+ {
+       switch (port) {
+       case PORT_A:
+               pipe_config->ddi_pll_sel = SKL_DPLL0;
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+               break;
+       case PORT_B:
+               pipe_config->ddi_pll_sel = SKL_DPLL1;
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+               break;
+       case PORT_C:
+               pipe_config->ddi_pll_sel = SKL_DPLL2;
+               pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+               break;
+       default:
+               DRM_ERROR("Incorrect port type\n");
+       }
+ }
  static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
                                enum port port,
                                struct intel_crtc_state *pipe_config)
@@@ -8545,6 -9180,8 +9180,8 @@@ static void haswell_get_ddi_port_state(
  
        if (IS_SKYLAKE(dev))
                skylake_get_ddi_pll(dev_priv, port, pipe_config);
+       else if (IS_BROXTON(dev))
+               bxt_get_ddi_pll(dev_priv, port, pipe_config);
        else
                haswell_get_ddi_pll(dev_priv, port, pipe_config);
  
@@@ -8621,12 -9258,22 +9258,22 @@@ static bool haswell_get_pipe_config(str
  
        intel_get_pipe_timings(crtc, pipe_config);
  
+       if (INTEL_INFO(dev)->gen >= 9) {
+               skl_init_scalers(dev, crtc, pipe_config);
+       }
        pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
        if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
-               if (IS_SKYLAKE(dev))
+               if (INTEL_INFO(dev)->gen == 9)
                        skylake_get_pfit_config(crtc, pipe_config);
-               else
+               else if (INTEL_INFO(dev)->gen < 9)
                        ironlake_get_pfit_config(crtc, pipe_config);
+               else
+                       MISSING_CASE(INTEL_INFO(dev)->gen);
+       } else {
+               pipe_config->scaler_state.scaler_id = -1;
+               pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
        }
  
        if (IS_HASWELL(dev))
@@@ -9070,7 -9717,6 +9717,6 @@@ retry
  
        intel_crtc = to_intel_crtc(crtc);
        intel_crtc->new_enabled = true;
-       intel_crtc->new_config = intel_crtc->config;
        old->dpms_mode = connector->dpms;
        old->load_detect_temp = true;
        old->release_fb = NULL;
  
   fail:
        intel_crtc->new_enabled = crtc->state->enable;
-       if (intel_crtc->new_enabled)
-               intel_crtc->new_config = intel_crtc->config;
-       else
-               intel_crtc->new_config = NULL;
  fail_unlock:
        if (state) {
                drm_atomic_state_free(state);
@@@ -9175,7 -9817,6 +9817,6 @@@ void intel_release_load_detect_pipe(str
                to_intel_connector(connector)->new_encoder = NULL;
                intel_encoder->new_crtc = NULL;
                intel_crtc->new_enabled = false;
-               intel_crtc->new_config = NULL;
  
                connector_state->best_encoder = NULL;
                connector_state->crtc = NULL;
@@@ -9915,23 -10556,34 +10556,34 @@@ static void skl_do_mmio_flip(struct int
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
        const enum pipe pipe = intel_crtc->pipe;
        u32 ctl, stride;
  
        ctl = I915_READ(PLANE_CTL(pipe, 0));
        ctl &= ~PLANE_CTL_TILED_MASK;
-       if (obj->tiling_mode == I915_TILING_X)
+       switch (fb->modifier[0]) {
+       case DRM_FORMAT_MOD_NONE:
+               break;
+       case I915_FORMAT_MOD_X_TILED:
                ctl |= PLANE_CTL_TILED_X;
+               break;
+       case I915_FORMAT_MOD_Y_TILED:
+               ctl |= PLANE_CTL_TILED_Y;
+               break;
+       case I915_FORMAT_MOD_Yf_TILED:
+               ctl |= PLANE_CTL_TILED_YF;
+               break;
+       default:
+               MISSING_CASE(fb->modifier[0]);
+       }
  
        /*
         * The stride is either expressed as a multiple of 64 bytes chunks for
         * linear buffers or in number of tiles for tiled buffers.
         */
-       stride = fb->pitches[0] >> 6;
-       if (obj->tiling_mode == I915_TILING_X)
-               stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */
+       stride = fb->pitches[0] /
+                intel_fb_stride_alignment(dev, fb->modifier[0],
+                                          fb->pixel_format);
  
        /*
         * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
@@@ -10085,6 -10737,7 +10737,7 @@@ void intel_check_page_flip(struct drm_d
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_unpin_work *work;
  
        WARN_ON(!in_interrupt());
  
                return;
  
        spin_lock(&dev->event_lock);
-       if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
+       work = intel_crtc->unpin_work;
+       if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
                WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
-                        intel_crtc->unpin_work->flip_queued_vblank,
-                        drm_vblank_count(dev, pipe));
+                        work->flip_queued_vblank, drm_vblank_count(dev, pipe));
                page_flip_completed(intel_crtc);
+               work = NULL;
        }
+       if (work != NULL &&
+           drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
+               intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
        spin_unlock(&dev->event_lock);
  }
  
@@@ -10115,6 -10772,7 +10772,7 @@@ static int intel_crtc_page_flip(struct 
        enum pipe pipe = intel_crtc->pipe;
        struct intel_unpin_work *work;
        struct intel_engine_cs *ring;
+       bool mmio_flip;
        int ret;
  
        /*
                ring = &dev_priv->ring[RCS];
        }
  
+       mmio_flip = use_mmio_flip(ring, obj);
+       /* When using CS flips, we want to emit semaphores between rings.
+        * However, when using mmio flips we will create a task to do the
+        * synchronisation, so all we want here is to pin the framebuffer
+        * into the display plane and skip any waits.
+        */
        ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
-                                        crtc->primary->state, ring);
+                                        crtc->primary->state,
+                                        mmio_flip ? i915_gem_request_get_ring(obj->last_read_req) : ring);
        if (ret)
                goto cleanup_pending;
  
        work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
                                                  + intel_crtc->dspaddr_offset;
  
-       if (use_mmio_flip(ring, obj)) {
+       if (mmio_flip) {
                ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
                                            page_flip_flags);
                if (ret)
@@@ -10315,11 -10981,6 +10981,6 @@@ static void intel_modeset_update_staged
  
        for_each_intel_crtc(dev, crtc) {
                crtc->new_enabled = crtc->base.state->enable;
-               if (crtc->new_enabled)
-                       crtc->new_config = crtc->config;
-               else
-                       crtc->new_config = NULL;
        }
  }
  
@@@ -10399,7 -11060,6 +11060,6 @@@ connected_sink_compute_bpp(struct intel
  
  static int
  compute_baseline_pipe_bpp(struct intel_crtc *crtc,
-                         struct drm_framebuffer *fb,
                          struct intel_crtc_state *pipe_config)
  {
        struct drm_device *dev = crtc->base.dev;
        struct intel_connector *connector;
        int bpp, i;
  
-       switch (fb->pixel_format) {
-       case DRM_FORMAT_C8:
-               bpp = 8*3; /* since we go through a colormap */
-               break;
-       case DRM_FORMAT_XRGB1555:
-       case DRM_FORMAT_ARGB1555:
-               /* checked in intel_framebuffer_init already */
-               if (WARN_ON(INTEL_INFO(dev)->gen > 3))
-                       return -EINVAL;
-       case DRM_FORMAT_RGB565:
-               bpp = 6*3; /* min is 18bpp */
-               break;
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_ABGR8888:
-               /* checked in intel_framebuffer_init already */
-               if (WARN_ON(INTEL_INFO(dev)->gen < 4))
-                       return -EINVAL;
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_ARGB8888:
-               bpp = 8*3;
-               break;
-       case DRM_FORMAT_XRGB2101010:
-       case DRM_FORMAT_ARGB2101010:
-       case DRM_FORMAT_XBGR2101010:
-       case DRM_FORMAT_ABGR2101010:
-               /* checked in intel_framebuffer_init already */
-               if (WARN_ON(INTEL_INFO(dev)->gen < 4))
-                       return -EINVAL;
+       if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
                bpp = 10*3;
-               break;
-       /* TODO: gen4+ supports 16 bpc floating point, too. */
-       default:
-               DRM_DEBUG_KMS("unsupported depth\n");
-               return -EINVAL;
-       }
+       else if (INTEL_INFO(dev)->gen >= 5)
+               bpp = 12*3;
+       else
+               bpp = 8*3;
  
        pipe_config->pipe_bpp = bpp;
  
@@@ -10477,8 -11109,14 +11109,14 @@@ static void intel_dump_pipe_config(stru
                                   struct intel_crtc_state *pipe_config,
                                   const char *context)
  {
-       DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
-                     context, pipe_name(crtc->pipe));
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_plane *plane;
+       struct intel_plane *intel_plane;
+       struct intel_plane_state *state;
+       struct drm_framebuffer *fb;
+       DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
+                     context, pipe_config, pipe_name(crtc->pipe));
  
        DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
        DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
        DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
        DRM_DEBUG_KMS("pipe src size: %dx%d\n",
                      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+       DRM_DEBUG_KMS("num_scalers: %d\n", crtc->num_scalers);
+       DRM_DEBUG_KMS("scaler_users: 0x%x\n", pipe_config->scaler_state.scaler_users);
+       DRM_DEBUG_KMS("scaler id: %d\n", pipe_config->scaler_state.scaler_id);
        DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
                      pipe_config->gmch_pfit.control,
                      pipe_config->gmch_pfit.pgm_ratios,
                      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
        DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
        DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
+       DRM_DEBUG_KMS("planes on this crtc\n");
+       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+               intel_plane = to_intel_plane(plane);
+               if (intel_plane->pipe != crtc->pipe)
+                       continue;
+               state = to_intel_plane_state(plane->state);
+               fb = state->base.fb;
+               if (!fb) {
+                       DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
+                               "disabled, scaler_id = %d\n",
+                               plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
+                               plane->base.id, intel_plane->pipe,
+                               (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
+                               drm_plane_index(plane), state->scaler_id);
+                       continue;
+               }
+               DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
+                       plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
+                       plane->base.id, intel_plane->pipe,
+                       crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
+                       drm_plane_index(plane));
+               DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
+                       fb->base.id, fb->width, fb->height, fb->pixel_format);
+               DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
+                       state->scaler_id,
+                       state->src.x1 >> 16, state->src.y1 >> 16,
+                       drm_rect_width(&state->src) >> 16,
+                       drm_rect_height(&state->src) >> 16,
+                       state->dst.x1, state->dst.y1,
+                       drm_rect_width(&state->dst), drm_rect_height(&state->dst));
+       }
  }
  
  static bool encoders_cloneable(const struct intel_encoder *a,
                          b->cloneable & (1 << a->type));
  }
  
- static bool check_single_encoder_cloning(struct intel_crtc *crtc,
+ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+                                        struct intel_crtc *crtc,
                                         struct intel_encoder *encoder)
  {
-       struct drm_device *dev = crtc->base.dev;
        struct intel_encoder *source_encoder;
+       struct drm_connector_state *connector_state;
+       int i;
  
-       for_each_intel_encoder(dev, source_encoder) {
-               if (source_encoder->new_crtc != crtc)
+       for (i = 0; i < state->num_connector; i++) {
+               if (!state->connectors[i])
+                       continue;
+               connector_state = state->connector_states[i];
+               if (connector_state->crtc != &crtc->base)
                        continue;
  
+               source_encoder =
+                       to_intel_encoder(connector_state->best_encoder);
                if (!encoders_cloneable(encoder, source_encoder))
                        return false;
        }
        return true;
  }
  
- static bool check_encoder_cloning(struct intel_crtc *crtc)
+ static bool check_encoder_cloning(struct drm_atomic_state *state,
+                                 struct intel_crtc *crtc)
  {
-       struct drm_device *dev = crtc->base.dev;
        struct intel_encoder *encoder;
+       struct drm_connector_state *connector_state;
+       int i;
  
-       for_each_intel_encoder(dev, encoder) {
-               if (encoder->new_crtc != crtc)
+       for (i = 0; i < state->num_connector; i++) {
+               if (!state->connectors[i])
                        continue;
  
-               if (!check_single_encoder_cloning(crtc, encoder))
+               connector_state = state->connector_states[i];
+               if (connector_state->crtc != &crtc->base)
+                       continue;
+               encoder = to_intel_encoder(connector_state->best_encoder);
+               if (!check_single_encoder_cloning(state, crtc, encoder))
                        return false;
        }
  
        return true;
  }
  
- static bool check_digital_port_conflicts(struct drm_device *dev)
+ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
  {
-       struct intel_connector *connector;
+       struct drm_device *dev = state->dev;
+       struct intel_encoder *encoder;
+       struct drm_connector_state *connector_state;
        unsigned int used_ports = 0;
+       int i;
  
        /*
         * Walk the connector list instead of the encoder
         * list to detect the problem on ddi platforms
         * where there's just one encoder per digital port.
         */
-       for_each_intel_connector(dev, connector) {
-               struct intel_encoder *encoder = connector->new_encoder;
+       for (i = 0; i < state->num_connector; i++) {
+               if (!state->connectors[i])
+                       continue;
  
-               if (!encoder)
+               connector_state = state->connector_states[i];
+               if (!connector_state->best_encoder)
                        continue;
  
-               WARN_ON(!encoder->new_crtc);
+               encoder = to_intel_encoder(connector_state->best_encoder);
+               WARN_ON(!connector_state->crtc);
  
                switch (encoder->type) {
                        unsigned int port_mask;
@@@ -10613,34 -11310,35 +11310,35 @@@ static voi
  clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
  {
        struct drm_crtc_state tmp_state;
+       struct intel_crtc_scaler_state scaler_state;
  
-       /* Clear only the intel specific part of the crtc state */
+       /* Clear only the intel specific part of the crtc state excluding scalers */
        tmp_state = crtc_state->base;
+       scaler_state = crtc_state->scaler_state;
        memset(crtc_state, 0, sizeof *crtc_state);
        crtc_state->base = tmp_state;
+       crtc_state->scaler_state = scaler_state;
  }
  
  static struct intel_crtc_state *
  intel_modeset_pipe_config(struct drm_crtc *crtc,
-                         struct drm_framebuffer *fb,
                          struct drm_display_mode *mode,
                          struct drm_atomic_state *state)
  {
-       struct drm_device *dev = crtc->dev;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
        struct drm_connector_state *connector_state;
        struct intel_crtc_state *pipe_config;
-       int plane_bpp, ret = -EINVAL;
+       int base_bpp, ret = -EINVAL;
        int i;
        bool retry = true;
  
-       if (!check_encoder_cloning(to_intel_crtc(crtc))) {
+       if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
                DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
                return ERR_PTR(-EINVAL);
        }
  
-       if (!check_digital_port_conflicts(dev)) {
+       if (!check_digital_port_conflicts(state)) {
                DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
                return ERR_PTR(-EINVAL);
        }
         * plane pixel format and any sink constraints into account. Returns the
         * source plane bpp so that dithering can be selected on mismatches
         * after encoders and crtc also have had their say. */
-       plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
-                                             fb, pipe_config);
-       if (plane_bpp < 0)
+       base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+                                            pipe_config);
+       if (base_bpp < 0)
                goto fail;
  
        /*
@@@ -10746,9 -11444,9 +11444,9 @@@ encoder_retry
                goto encoder_retry;
        }
  
-       pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
+       pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
        DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
-                     plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  
        return pipe_config;
  fail:
@@@ -10880,9 -11578,6 +11578,6 @@@ intel_modeset_update_state(struct drm_d
        /* Double check state. */
        for_each_intel_crtc(dev, intel_crtc) {
                WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
-               WARN_ON(intel_crtc->new_config &&
-                       intel_crtc->new_config != intel_crtc->config);
-               WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
        }
  
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@@ -11087,6 -11782,8 +11782,8 @@@ intel_pipe_config_compare(struct drm_de
                PIPE_CONF_CHECK_I(pch_pfit.size);
        }
  
+       PIPE_CONF_CHECK_I(scaler_state.scaler_id);
        /* BDW+ don't expose a synchronous way to read the state */
        if (IS_HASWELL(dev))
                PIPE_CONF_CHECK_I(ips_enabled);
@@@ -11429,7 -12126,6 +12126,6 @@@ static void update_scanline_offset(stru
  static struct intel_crtc_state *
  intel_modeset_compute_config(struct drm_crtc *crtc,
                             struct drm_display_mode *mode,
-                            struct drm_framebuffer *fb,
                             struct drm_atomic_state *state,
                             unsigned *modeset_pipes,
                             unsigned *prepare_pipes,
                if (WARN_ON(&intel_crtc->base != crtc))
                        continue;
  
-               pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state);
+               pipe_config = intel_modeset_pipe_config(crtc, mode, state);
                if (IS_ERR(pipe_config))
                        return pipe_config;
  
+               pipe_config->base.enable = true;
                intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
                                       "[modeset]");
        }
        return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
  }
  
- static int __intel_set_mode_setup_plls(struct drm_device *dev,
+ static int __intel_set_mode_setup_plls(struct drm_atomic_state *state,
                                       unsigned modeset_pipes,
                                       unsigned disable_pipes)
  {
+       struct drm_device *dev = state->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned clear_pipes = modeset_pipes | disable_pipes;
        struct intel_crtc *intel_crtc;
                goto done;
  
        for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
-               struct intel_crtc_state *state = intel_crtc->new_config;
+               struct intel_crtc_state *crtc_state =
+                       intel_atomic_get_crtc_state(state, intel_crtc);
+               /* Modeset pipes should have a new state by now */
+               if (WARN_ON(IS_ERR(crtc_state)))
+                       continue;
                ret = dev_priv->display.crtc_compute_clock(intel_crtc,
-                                                          state);
+                                                          crtc_state);
                if (ret) {
                        intel_shared_dpll_abort_config(dev_priv);
                        goto done;
@@@ -11519,6 -12224,7 +12224,7 @@@ static int __intel_set_mode(struct drm_
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *saved_mode;
+       struct drm_atomic_state *state = pipe_config->base.state;
        struct intel_crtc_state *crtc_state_copy = NULL;
        struct intel_crtc *intel_crtc;
        int ret = 0;
  
        *saved_mode = crtc->mode;
  
-       if (modeset_pipes)
-               to_intel_crtc(crtc)->new_config = pipe_config;
        /*
         * See if the config requires any additional preparation, e.g.
         * to adjust global state with pipes off.  We need to do this
         * mode set on this crtc.  For other crtcs we need to use the
         * adjusted_mode bits in the crtc directly.
         */
-       if (IS_VALLEYVIEW(dev)) {
-               valleyview_modeset_global_pipes(dev, &prepare_pipes);
+       if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
+               ret = valleyview_modeset_global_pipes(state, &prepare_pipes);
+               if (ret)
+                       goto done;
  
                /* may have added more to prepare_pipes than we should */
                prepare_pipes &= ~disable_pipes;
        }
  
-       ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes);
+       ret = __intel_set_mode_setup_plls(state, modeset_pipes, disable_pipes);
        if (ret)
                goto done;
  
         * update the the output configuration. */
        intel_modeset_update_state(dev, prepare_pipes);
  
-       modeset_update_crtc_power_domains(pipe_config->base.state);
+       modeset_update_crtc_power_domains(state);
  
-       /* Set up the DPLL and any encoders state that needs to adjust or depend
-        * on the DPLL.
-        */
        for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
                struct drm_plane *primary = intel_crtc->base.primary;
                int vdisplay, hdisplay;
  
                drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
-               ret = primary->funcs->update_plane(primary, &intel_crtc->base,
-                                                  fb, 0, 0,
-                                                  hdisplay, vdisplay,
-                                                  x << 16, y << 16,
-                                                  hdisplay << 16, vdisplay << 16);
+               ret = drm_plane_helper_update(primary, &intel_crtc->base,
+                                             fb, 0, 0,
+                                             hdisplay, vdisplay,
+                                             x << 16, y << 16,
+                                             hdisplay << 16, vdisplay << 16);
        }
  
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@@ -11628,9 -12330,6 +12330,6 @@@ done
                       sizeof *crtc_state_copy);
                intel_crtc->config = crtc_state_copy;
                intel_crtc->base.state = &crtc_state_copy->base;
-               if (modeset_pipes)
-                       intel_crtc->new_config = intel_crtc->config;
        } else {
                kfree(crtc_state_copy);
        }
@@@ -11667,7 -12366,7 +12366,7 @@@ static int intel_set_mode(struct drm_cr
        unsigned modeset_pipes, prepare_pipes, disable_pipes;
        int ret = 0;
  
-       pipe_config = intel_modeset_compute_config(crtc, mode, fb, state,
+       pipe_config = intel_modeset_compute_config(crtc, mode, state,
                                                   &modeset_pipes,
                                                   &prepare_pipes,
                                                   &disable_pipes);
@@@ -11809,11 -12508,6 +12508,6 @@@ static void intel_set_config_restore_st
        count = 0;
        for_each_intel_crtc(dev, crtc) {
                crtc->new_enabled = config->save_crtc_enabled[count++];
-               if (crtc->new_enabled)
-                       crtc->new_config = crtc->config;
-               else
-                       crtc->new_config = NULL;
        }
  
        count = 0;
@@@ -12021,6 -12715,7 +12715,7 @@@ intel_modeset_stage_output_state(struc
                                connector->encoder = connector->new_encoder;
                } else {
                        connector_state->crtc = NULL;
+                       connector_state->best_encoder = NULL;
                }
        }
        for_each_intel_crtc(dev, crtc) {
                                      crtc->new_enabled ? "en" : "dis");
                        config->mode_changed = true;
                }
-               if (crtc->new_enabled)
-                       crtc->new_config = crtc->config;
-               else
-                       crtc->new_config = NULL;
        }
  
        return 0;
@@@ -12070,7 -12760,6 +12760,6 @@@ static void disable_crtc_nofb(struct in
        }
  
        crtc->new_enabled = false;
-       crtc->new_config = NULL;
  }
  
  static int intel_crtc_set_config(struct drm_mode_set *set)
                goto fail;
  
        pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
-                                                  set->fb, state,
+                                                  state,
                                                   &modeset_pipes,
                                                   &prepare_pipes,
                                                   &disable_pipes);
                int vdisplay, hdisplay;
  
                drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
-               ret = primary->funcs->update_plane(primary, set->crtc, set->fb,
-                                                  0, 0, hdisplay, vdisplay,
-                                                  set->x << 16, set->y << 16,
-                                                  hdisplay << 16, vdisplay << 16);
+               ret = drm_plane_helper_update(primary, set->crtc, set->fb,
+                                             0, 0, hdisplay, vdisplay,
+                                             set->x << 16, set->y << 16,
+                                             hdisplay << 16, vdisplay << 16);
  
                /*
                 * We need to make sure the primary plane is re-enabled if it
@@@ -12456,16 -13145,21 +13145,21 @@@ intel_check_primary_plane(struct drm_pl
        struct drm_rect *dest = &state->dst;
        struct drm_rect *src = &state->src;
        const struct drm_rect *clip = &state->clip;
+       bool can_position = false;
        int ret;
  
        crtc = crtc ? crtc : plane->crtc;
        intel_crtc = to_intel_crtc(crtc);
  
+       if (INTEL_INFO(dev)->gen >= 9)
+               can_position = true;
        ret = drm_plane_helper_check_update(plane, crtc, fb,
                                            src, dest, clip,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            DRM_PLANE_HELPER_NO_SCALING,
-                                           false, true, &state->visible);
+                                           can_position, true,
+                                           &state->visible);
        if (ret)
                return ret;
  
@@@ -12655,8 -13349,8 +13349,8 @@@ void intel_plane_destroy(struct drm_pla
  }
  
  const struct drm_plane_funcs intel_plane_funcs = {
-       .update_plane = drm_plane_helper_update,
-       .disable_plane = drm_plane_helper_disable,
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
        .set_property = drm_atomic_helper_plane_set_property,
        .atomic_get_property = intel_plane_atomic_get_property,
@@@ -12687,10 -13381,12 +13381,12 @@@ static struct drm_plane *intel_primary_
  
        primary->can_scale = false;
        primary->max_downscale = 1;
+       state->scaler_id = -1;
        primary->pipe = pipe;
        primary->plane = pipe;
        primary->check_plane = intel_check_primary_plane;
        primary->commit_plane = intel_commit_primary_plane;
+       primary->ckey.flags = I915_SET_COLORKEY_NONE;
        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
                primary->plane = !pipe;
  
                                 intel_primary_formats, num_formats,
                                 DRM_PLANE_TYPE_PRIMARY);
  
-       if (INTEL_INFO(dev)->gen >= 4) {
-               if (!dev->mode_config.rotation_property)
-                       dev->mode_config.rotation_property =
-                               drm_mode_create_rotation_property(dev,
-                                                       BIT(DRM_ROTATE_0) |
-                                                       BIT(DRM_ROTATE_180));
-               if (dev->mode_config.rotation_property)
-                       drm_object_attach_property(&primary->base.base,
-                               dev->mode_config.rotation_property,
-                               state->base.rotation);
-       }
+       if (INTEL_INFO(dev)->gen >= 4)
+               intel_create_rotation_property(dev, primary);
  
        drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
  
        return &primary->base;
  }
  
+ void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
+ {
+       if (!dev->mode_config.rotation_property) {
+               unsigned long flags = BIT(DRM_ROTATE_0) |
+                       BIT(DRM_ROTATE_180);
+               if (INTEL_INFO(dev)->gen >= 9)
+                       flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
+               dev->mode_config.rotation_property =
+                       drm_mode_create_rotation_property(dev, flags);
+       }
+       if (dev->mode_config.rotation_property)
+               drm_object_attach_property(&plane->base.base,
+                               dev->mode_config.rotation_property,
+                               plane->base.state->rotation);
+ }
  static int
  intel_check_cursor_plane(struct drm_plane *plane,
                         struct intel_plane_state *state)
@@@ -12841,6 -13546,7 +13546,7 @@@ static struct drm_plane *intel_cursor_p
        cursor->max_downscale = 1;
        cursor->pipe = pipe;
        cursor->plane = pipe;
+       state->scaler_id = -1;
        cursor->check_plane = intel_check_cursor_plane;
        cursor->commit_plane = intel_commit_cursor_plane;
  
        return &cursor->base;
  }
  
+ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
+       struct intel_crtc_state *crtc_state)
+ {
+       int i;
+       struct intel_scaler *intel_scaler;
+       struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
+       for (i = 0; i < intel_crtc->num_scalers; i++) {
+               intel_scaler = &scaler_state->scalers[i];
+               intel_scaler->in_use = 0;
+               intel_scaler->id = i;
+               intel_scaler->mode = PS_SCALER_MODE_DYN;
+       }
+       scaler_state->scaler_id = -1;
+ }
  static void intel_crtc_init(struct drm_device *dev, int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        intel_crtc_set_state(intel_crtc, crtc_state);
        crtc_state->base.crtc = &intel_crtc->base;
  
+       /* initialize shared scalers */
+       if (INTEL_INFO(dev)->gen >= 9) {
+               if (pipe == PIPE_C)
+                       intel_crtc->num_scalers = 1;
+               else
+                       intel_crtc->num_scalers = SKL_NUM_SCALERS;
+               skl_init_scalers(dev, intel_crtc, crtc_state);
+       }
        primary = intel_primary_plane_create(dev, pipe);
        if (!primary)
                goto fail;
@@@ -13038,7 -13772,16 +13772,16 @@@ static void intel_setup_outputs(struct 
        if (intel_crt_present(dev))
                intel_crt_init(dev);
  
-       if (HAS_DDI(dev)) {
+       if (IS_BROXTON(dev)) {
+               /*
+                * FIXME: Broxton doesn't support port detection via the
+                * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
+                * detect the ports.
+                */
+               intel_ddi_init(dev, PORT_A);
+               intel_ddi_init(dev, PORT_B);
+               intel_ddi_init(dev, PORT_C);
+       } else if (HAS_DDI(dev)) {
                int found;
  
                /*
@@@ -13474,10 -14217,23 +14217,23 @@@ static void intel_init_display(struct d
        }
  
        /* Returns the core display clock speed */
-       if (IS_VALLEYVIEW(dev))
+       if (IS_SKYLAKE(dev))
+               dev_priv->display.get_display_clock_speed =
+                       skylake_get_display_clock_speed;
+       else if (IS_BROADWELL(dev))
+               dev_priv->display.get_display_clock_speed =
+                       broadwell_get_display_clock_speed;
+       else if (IS_HASWELL(dev))
+               dev_priv->display.get_display_clock_speed =
+                       haswell_get_display_clock_speed;
+       else if (IS_VALLEYVIEW(dev))
                dev_priv->display.get_display_clock_speed =
                        valleyview_get_display_clock_speed;
-       else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+       else if (IS_GEN5(dev))
+               dev_priv->display.get_display_clock_speed =
+                       ilk_get_display_clock_speed;
+       else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
+                IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
                dev_priv->display.get_display_clock_speed =
                        i945_get_display_clock_speed;
        else if (IS_I915G(dev))
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.modeset_global_resources =
                        valleyview_modeset_global_resources;
+       } else if (IS_BROXTON(dev)) {
+               dev_priv->display.modeset_global_resources =
+                       broxton_modeset_global_resources;
        }
  
        switch (INTEL_INFO(dev)->gen) {
@@@ -14259,7 -15018,6 +15018,7 @@@ void intel_modeset_gem_init(struct drm_
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *c;
        struct drm_i915_gem_object *obj;
 +      int ret;
  
        mutex_lock(&dev->struct_mutex);
        intel_init_gt_powersave(dev);
         * pinned & fenced.  When we do the allocation it's too early
         * for this.
         */
 -      mutex_lock(&dev->struct_mutex);
        for_each_crtc(dev, c) {
                obj = intel_fb_obj(c->primary->fb);
                if (obj == NULL)
                        continue;
  
 -              if (intel_pin_and_fence_fb_obj(c->primary,
 -                                             c->primary->fb,
 -                                             c->primary->state,
 -                                             NULL)) {
 +              mutex_lock(&dev->struct_mutex);
 +              ret = intel_pin_and_fence_fb_obj(c->primary,
 +                                               c->primary->fb,
 +                                               c->primary->state,
 +                                               NULL);
 +              mutex_unlock(&dev->struct_mutex);
 +              if (ret) {
                        DRM_ERROR("failed to pin boot fb on pipe %d\n",
                                  to_intel_crtc(c)->pipe);
                        drm_framebuffer_unreference(c->primary->fb);
                        update_state_fb(c->primary);
                }
        }
 -      mutex_unlock(&dev->struct_mutex);
  
        intel_backlight_register(dev);
  }
@@@ -696,15 -696,13 +696,13 @@@ static uint32_t ilk_get_aux_clock_divid
  {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
  
        if (index)
                return 0;
  
        if (intel_dig_port->port == PORT_A) {
-               if (IS_GEN6(dev) || IS_GEN7(dev))
-                       return 200; /* SNB & IVB eDP input clock at 400Mhz */
-               else
-                       return 225; /* eDP input clock at 450Mhz */
+               return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
        } else {
                return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
        }
@@@ -719,7 -717,7 +717,7 @@@ static uint32_t hsw_get_aux_clock_divid
        if (intel_dig_port->port == PORT_A) {
                if (index)
                        return 0;
-               return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
+               return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
        } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                /* Workaround for non-ULT HSW */
                switch (index) {
@@@ -876,9 -874,18 +874,18 @@@ intel_dp_aux_ch(struct intel_dp *intel_
                                   DP_AUX_CH_CTL_TIME_OUT_ERROR |
                                   DP_AUX_CH_CTL_RECEIVE_ERROR);
  
-                       if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                                     DP_AUX_CH_CTL_RECEIVE_ERROR))
+                       if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+                               continue;
+                       /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+                        *   400us delay required for errors and timeouts
+                        *   Timeout errors from the HW already meet this
+                        *   requirement so skip to next iteration
+                        */
+                       if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+                               usleep_range(400, 500);
                                continue;
+                       }
                        if (status & DP_AUX_CH_CTL_DONE)
                                break;
                }
@@@ -1353,6 -1360,14 +1360,14 @@@ intel_dp_compute_config(struct intel_en
        if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
                intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
                                       adjusted_mode);
+               if (INTEL_INFO(dev)->gen >= 9) {
+                       int ret;
+                       ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
+                       if (ret)
+                               return ret;
+               }
                if (!HAS_PCH_SPLIT(dev))
                        intel_gmch_panel_fitting(intel_crtc, pipe_config,
                                                 intel_connector->panel.fitting_mode);
@@@ -1465,6 -1480,8 +1480,8 @@@ found
  
        if (IS_SKYLAKE(dev) && is_edp(intel_dp))
                skl_edp_set_pll_config(pipe_config, common_rates[clock]);
+       else if (IS_BROXTON(dev))
+               /* handled in ddi */;
        else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
        else
@@@ -2742,6 -2759,11 +2759,6 @@@ static void chv_pre_enable_dp(struct in
  
        /* Program Tx lane latency optimal setting*/
        for (i = 0; i < 4; i++) {
 -              /* Set the latency optimal bit */
 -              data = (i == 1) ? 0x0 : 0x6;
 -              vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
 -                              data << DPIO_FRC_LATENCY_SHFIT);
 -
                /* Set the upar bit */
                data = (i == 1) ? 0x0 : 0x1;
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
@@@ -2874,7 -2896,9 +2891,9 @@@ intel_dp_voltage_max(struct intel_dp *i
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = dp_to_dig_port(intel_dp)->port;
  
-       if (INTEL_INFO(dev)->gen >= 9) {
+       if (IS_BROXTON(dev))
+               return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+       else if (INTEL_INFO(dev)->gen >= 9) {
                if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
                        return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@@ -2956,7 -2980,7 +2975,7 @@@ intel_dp_pre_emphasis_max(struct intel_
        }
  }
  
- static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
+ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        return 0;
  }
  
- static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
+ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -3263,7 -3287,7 +3282,7 @@@ intel_get_adjust_train(struct intel_dp 
  }
  
  static uint32_t
intel_gen4_signal_levels(uint8_t train_set)
+ gen4_signal_levels(uint8_t train_set)
  {
        uint32_t        signal_levels = 0;
  
  
  /* Gen6's DP voltage swing and pre-emphasis control */
  static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set)
+ gen6_edp_signal_levels(uint8_t train_set)
  {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
  
  /* Gen7's DP voltage swing and pre-emphasis control */
  static uint32_t
intel_gen7_edp_signal_levels(uint8_t train_set)
+ gen7_edp_signal_levels(uint8_t train_set)
  {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
  
  /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
  static uint32_t
intel_hsw_signal_levels(uint8_t train_set)
+ hsw_signal_levels(uint8_t train_set)
  {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
        }
  }
  
+ static void bxt_signal_levels(struct intel_dp *intel_dp)
+ {
+       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       enum port port = dport->port;
+       struct drm_device *dev = dport->base.base.dev;
+       struct intel_encoder *encoder = &dport->base;
+       uint8_t train_set = intel_dp->train_set[0];
+       uint32_t level = 0;
+       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       switch (signal_levels) {
+       default:
+               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               level = 0;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               level = 1;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+               level = 2;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+               level = 3;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               level = 4;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               level = 5;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+               level = 6;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               level = 7;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+               level = 8;
+               break;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+               level = 9;
+               break;
+       }
+       bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+ }
  /* Properly updates "DP" with the correct signal levels. */
  static void
  intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
        uint32_t signal_levels, mask;
        uint8_t train_set = intel_dp->train_set[0];
  
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
-               signal_levels = intel_hsw_signal_levels(train_set);
+       if (IS_BROXTON(dev)) {
+               signal_levels = 0;
+               bxt_signal_levels(intel_dp);
+               mask = 0;
+       } else if (HAS_DDI(dev)) {
+               signal_levels = hsw_signal_levels(train_set);
                mask = DDI_BUF_EMP_MASK;
        } else if (IS_CHERRYVIEW(dev)) {
-               signal_levels = intel_chv_signal_levels(intel_dp);
+               signal_levels = chv_signal_levels(intel_dp);
                mask = 0;
        } else if (IS_VALLEYVIEW(dev)) {
-               signal_levels = intel_vlv_signal_levels(intel_dp);
+               signal_levels = vlv_signal_levels(intel_dp);
                mask = 0;
        } else if (IS_GEN7(dev) && port == PORT_A) {
-               signal_levels = intel_gen7_edp_signal_levels(train_set);
+               signal_levels = gen7_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
        } else if (IS_GEN6(dev) && port == PORT_A) {
-               signal_levels = intel_gen6_edp_signal_levels(train_set);
+               signal_levels = gen6_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
        } else {
-               signal_levels = intel_gen4_signal_levels(train_set);
+               signal_levels = gen4_signal_levels(train_set);
                mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
        }
  
-       DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+       if (mask)
+               DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+       DRM_DEBUG_KMS("Using vswing level %d\n",
+               train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
+       DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
+               (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+                       DP_TRAIN_PRE_EMPHASIS_SHIFT);
  
        *DP = (*DP & ~mask) | signal_levels;
  }
@@@ -3782,6 -3866,21 +3861,21 @@@ intel_dp_get_dpcd(struct intel_dp *inte
                        dev_priv->psr.sink_support = true;
                        DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
                }
+               if (INTEL_INFO(dev)->gen >= 9 &&
+                       (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+                       uint8_t frame_sync_cap;
+                       dev_priv->psr.sink_support = true;
+                       intel_dp_dpcd_read_wake(&intel_dp->aux,
+                                       DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+                                       &frame_sync_cap, 1);
+                       dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
+                       /* PSR2 needs frame sync as well */
+                       dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+                       DRM_DEBUG_KMS("PSR2 %s on sink",
+                               dev_priv->psr.psr2_support ? "supported" : "not supported");
+               }
        }
  
        /* Training Pattern 3 support, both source and sink */
@@@ -3949,11 -4048,78 +4043,78 @@@ intel_dp_get_sink_irq_esi(struct intel_
        return true;
  }
  
- static void
- intel_dp_handle_test_request(struct intel_dp *intel_dp)
+ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
+ {
+       uint8_t test_result = DP_TEST_ACK;
+       return test_result;
+ }
+ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
+ {
+       uint8_t test_result = DP_TEST_NAK;
+       return test_result;
+ }
+ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
  {
-       /* NAK by default */
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
+       uint8_t test_result = DP_TEST_NAK;
+       return test_result;
+ }
+ static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+ {
+       uint8_t test_result = DP_TEST_NAK;
+       return test_result;
+ }
+ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
+ {
+       uint8_t response = DP_TEST_NAK;
+       uint8_t rxdata = 0;
+       int status = 0;
+       intel_dp->compliance_test_type = 0;
+       intel_dp->aux.i2c_nack_count = 0;
+       intel_dp->aux.i2c_defer_count = 0;
+       status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
+       if (status <= 0) {
+               DRM_DEBUG_KMS("Could not read test request from sink\n");
+               goto update_status;
+       }
+       switch (rxdata) {
+       case DP_TEST_LINK_TRAINING:
+               DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
+               intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
+               response = intel_dp_autotest_link_training(intel_dp);
+               break;
+       case DP_TEST_LINK_VIDEO_PATTERN:
+               DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
+               intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
+               response = intel_dp_autotest_video_pattern(intel_dp);
+               break;
+       case DP_TEST_LINK_EDID_READ:
+               DRM_DEBUG_KMS("EDID test requested\n");
+               intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
+               response = intel_dp_autotest_edid(intel_dp);
+               break;
+       case DP_TEST_LINK_PHY_TEST_PATTERN:
+               DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
+               intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
+               response = intel_dp_autotest_phy_pattern(intel_dp);
+               break;
+       default:
+               DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
+               break;
+       }
+ update_status:
+       status = drm_dp_dpcd_write(&intel_dp->aux,
+                                  DP_TEST_RESPONSE,
+                                  &response, 1);
+       if (status <= 0)
+               DRM_DEBUG_KMS("Could not write test response to sink\n");
  }
  
  static int
@@@ -5152,6 -5318,7 +5313,6 @@@ static void intel_edp_drrs_downclock_wo
                        downclock_mode->vrefresh);
  
  unlock:
 -
        mutex_unlock(&dev_priv->drrs.mutex);
  }
  
@@@ -5173,17 -5340,12 +5334,17 @@@ void intel_edp_drrs_invalidate(struct d
        struct drm_crtc *crtc;
        enum pipe pipe;
  
 -      if (!dev_priv->drrs.dp)
 +      if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
                return;
  
 -      cancel_delayed_work_sync(&dev_priv->drrs.work);
 +      cancel_delayed_work(&dev_priv->drrs.work);
  
        mutex_lock(&dev_priv->drrs.mutex);
 +      if (!dev_priv->drrs.dp) {
 +              mutex_unlock(&dev_priv->drrs.mutex);
 +              return;
 +      }
 +
        crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
  
@@@ -5217,17 -5379,12 +5378,17 @@@ void intel_edp_drrs_flush(struct drm_de
        struct drm_crtc *crtc;
        enum pipe pipe;
  
 -      if (!dev_priv->drrs.dp)
 +      if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
                return;
  
 -      cancel_delayed_work_sync(&dev_priv->drrs.work);
 +      cancel_delayed_work(&dev_priv->drrs.work);
  
        mutex_lock(&dev_priv->drrs.mutex);
 +      if (!dev_priv->drrs.dp) {
 +              mutex_unlock(&dev_priv->drrs.mutex);
 +              return;
 +      }
 +
        crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
        pipe = to_intel_crtc(crtc)->pipe;
        dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
@@@ -5298,9 -5455,6 +5459,9 @@@ intel_dp_drrs_init(struct intel_connect
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *downclock_mode = NULL;
  
 +      INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
 +      mutex_init(&dev_priv->drrs.mutex);
 +
        if (INTEL_INFO(dev)->gen <= 6) {
                DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
                return NULL;
                return NULL;
        }
  
 -      INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
 -
 -      mutex_init(&dev_priv->drrs.mutex);
 -
        dev_priv->drrs.type = dev_priv->vbt.drrs_type;
  
        dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
@@@ -5574,6 -5732,8 +5735,8 @@@ intel_dp_init_connector(struct intel_di
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
  
+       i915_debugfs_connector_add(connector);
        return true;
  }
  
@@@ -37,6 -37,9 +37,6 @@@
  #include <drm/drm_rect.h>
  #include <drm/drm_atomic.h>
  
 -#define DIV_ROUND_CLOSEST_ULL(ll, d)  \
 -({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 -
  /**
   * _wait_for - magic (register) wait macro
   *
@@@ -253,6 -256,26 +253,26 @@@ struct intel_plane_state 
         * enable/disable the primary plane
         */
        bool hides_primary;
+       /*
+        * scaler_id
+        *    = -1 : not using a scaler
+        *    >=  0 : using a scalers
+        *
+        * plane requiring a scaler:
+        *   - During check_plane, its bit is set in
+        *     crtc_state->scaler_state.scaler_users by calling helper function
+        *     update_scaler_users.
+        *   - scaler_id indicates the scaler it got assigned.
+        *
+        * plane doesn't require a scaler:
+        *   - this can happen when scaling is no more required or plane simply
+        *     got disabled.
+        *   - During check_plane, corresponding bit is reset in
+        *     crtc_state->scaler_state.scaler_users by calling helper function
+        *     update_scaler_users.
+        */
+       int scaler_id;
  };
  
  struct intel_initial_plane_config {
        u32 base;
  };
  
+ #define SKL_MIN_SRC_W 8
+ #define SKL_MAX_SRC_W 4096
+ #define SKL_MIN_SRC_H 8
+ #define SKL_MAX_SRC_H 2304
+ #define SKL_MIN_DST_W 8
+ #define SKL_MAX_DST_W 4096
+ #define SKL_MIN_DST_H 8
+ #define SKL_MAX_DST_H 2304
+ struct intel_scaler {
+       int id;
+       int in_use;
+       uint32_t mode;
+ };
+ struct intel_crtc_scaler_state {
+ #define SKL_NUM_SCALERS 2
+       struct intel_scaler scalers[SKL_NUM_SCALERS];
+       /*
+        * scaler_users: keeps track of users requesting scalers on this crtc.
+        *
+        *     If a bit is set, a user is using a scaler.
+        *     Here user can be a plane or crtc as defined below:
+        *       bits 0-30 - plane (bit position is index from drm_plane_index)
+        *       bit 31    - crtc
+        *
+        * Instead of creating a new index to cover planes and crtc, using
+        * existing drm_plane_index for planes which is well less than 31
+        * planes and bit 31 for crtc. This should be fine to cover all
+        * our platforms.
+        *
+        * intel_atomic_setup_scalers will setup available scalers to users
+        * requesting scalers. It will gracefully fail if request exceeds
+        * avilability.
+        */
+ #define SKL_CRTC_INDEX 31
+       unsigned scaler_users;
+       /* scaler used by crtc for panel fitting purpose */
+       int scaler_id;
+ };
  struct intel_crtc_state {
        struct drm_crtc_state base;
  
  
        bool dp_encoder_is_mst;
        int pbn;
+       struct intel_crtc_scaler_state scaler_state;
  };
  
  struct intel_pipe_wm {
@@@ -468,7 -536,6 +533,6 @@@ struct intel_crtc 
  
        struct intel_initial_plane_config plane_config;
        struct intel_crtc_state *config;
-       struct intel_crtc_state *new_config;
        bool new_enabled;
  
        /* reset counter value when the last flip was submitted */
        struct intel_mmio_flip mmio_flip;
  
        struct intel_crtc_atomic_commit atomic;
+       /* scalers available on this crtc */
+       int num_scalers;
  };
  
  struct intel_plane_wm_parameters {
@@@ -669,6 -739,9 +736,9 @@@ struct intel_dp 
                                     bool has_aux_irq,
                                     int send_bytes,
                                     uint32_t aux_clock_divider);
+       /* Displayport compliance testing */
+       unsigned long compliance_test_type;
  };
  
  struct intel_digital_port {
@@@ -852,7 -925,6 +922,6 @@@ void hsw_fdi_link_train(struct drm_crt
  void intel_ddi_init(struct drm_device *dev, enum port port);
  enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
  bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
- int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
  void intel_ddi_pll_init(struct drm_device *dev);
  void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
  void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
@@@ -867,11 -939,15 +936,15 @@@ bool intel_ddi_connector_get_hw_state(s
  void intel_ddi_fdi_disable(struct drm_crtc *crtc);
  void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config);
+ struct intel_encoder *
+ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
  
  void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
  void intel_ddi_clock_get(struct intel_encoder *encoder,
                         struct intel_crtc_state *pipe_config);
  void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
+ void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+                               enum port port, int type);
  
  /* intel_frontbuffer.c */
  void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
@@@ -997,6 -1073,12 +1070,12 @@@ intel_rotation_90_or_270(unsigned int r
        return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
  }
  
+ unsigned int
+ intel_tile_height(struct drm_device *dev, uint32_t bits_per_pixel,
+                 uint64_t fb_modifier);
+ void intel_create_rotation_property(struct drm_device *dev,
+                                       struct intel_plane *plane);
  bool intel_wm_need_update(struct drm_plane *plane,
                          struct drm_plane_state *state);
  
@@@ -1037,6 -1119,13 +1116,13 @@@ void intel_prepare_reset(struct drm_dev
  void intel_finish_reset(struct drm_device *dev);
  void hsw_enable_pc8(struct drm_i915_private *dev_priv);
  void hsw_disable_pc8(struct drm_i915_private *dev_priv);
+ void broxton_init_cdclk(struct drm_device *dev);
+ void broxton_uninit_cdclk(struct drm_device *dev);
+ void broxton_set_cdclk(struct drm_device *dev, int frequency);
+ void broxton_ddi_phy_init(struct drm_device *dev);
+ void broxton_ddi_phy_uninit(struct drm_device *dev);
+ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+ void bxt_disable_dc9(struct drm_i915_private *dev_priv);
  void intel_dp_get_m_n(struct intel_crtc *crtc,
                      struct intel_crtc_state *pipe_config);
  void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
@@@ -1044,6 -1133,8 +1130,8 @@@ int intel_dotclock_calculate(int link_f
  void
  ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
                                int dotclock);
+ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+                       intel_clock_t *best_clock);
  bool intel_crtc_active(struct drm_crtc *crtc);
  void hsw_enable_ips(struct intel_crtc *crtc);
  void hsw_disable_ips(struct intel_crtc *crtc);
@@@ -1053,6 -1144,10 +1141,10 @@@ void intel_mode_from_pipe_config(struc
                                 struct intel_crtc_state *pipe_config);
  void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
  void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
+ void skl_detach_scalers(struct intel_crtc *intel_crtc);
+ int skl_update_scaler_users(struct intel_crtc *intel_crtc,
+       struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
+       struct intel_plane_state *plane_state, int force_detach);
  
  unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
                                     struct drm_i915_gem_object *obj);
@@@ -1215,6 -1310,7 +1307,7 @@@ void intel_psr_invalidate(struct drm_de
  void intel_psr_flush(struct drm_device *dev,
                         unsigned frontbuffer_bits);
  void intel_psr_init(struct drm_device *dev);
+ void intel_psr_single_frame_update(struct drm_device *dev);
  
  /* intel_runtime_pm.c */
  int intel_power_domains_init(struct drm_i915_private *);
@@@ -1263,7 -1359,10 +1356,10 @@@ void gen6_update_ring_freq(struct drm_d
  void gen6_rps_busy(struct drm_i915_private *dev_priv);
  void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
  void gen6_rps_idle(struct drm_i915_private *dev_priv);
- void gen6_rps_boost(struct drm_i915_private *dev_priv);
+ void gen6_rps_boost(struct drm_i915_private *dev_priv,
+                   struct drm_i915_file_private *file_priv);
+ void intel_queue_rps_boost_for_request(struct drm_device *dev,
+                                      struct drm_i915_gem_request *rq);
  void ilk_wm_get_hw_state(struct drm_device *dev);
  void skl_wm_get_hw_state(struct drm_device *dev);
  void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
@@@ -1314,6 -1413,9 +1410,9 @@@ intel_atomic_get_crtc_state(struct drm_
  
        return to_intel_crtc_state(crtc_state);
  }
+ int intel_atomic_setup_scalers(struct drm_device *dev,
+       struct intel_crtc *intel_crtc,
+       struct intel_crtc_state *crtc_state);
  
  /* intel_atomic_plane.c */
  struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
@@@ -223,10 -223,14 +223,14 @@@ static bool ibx_infoframe_enabled(struc
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
  
-       return val & VIDEO_DIP_ENABLE;
+       if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
+               return val & VIDEO_DIP_ENABLE;
+       return false;
  }
  
  static void cpt_write_infoframe(struct drm_encoder *encoder,
@@@ -324,10 -328,14 +328,14 @@@ static bool vlv_infoframe_enabled(struc
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
  
-       return val & VIDEO_DIP_ENABLE;
+       if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
+               return val & VIDEO_DIP_ENABLE;
+       return false;
  }
  
  static void hsw_write_infoframe(struct drm_encoder *encoder,
@@@ -1515,6 -1523,11 +1523,6 @@@ static void chv_hdmi_pre_enable(struct 
  
        /* Program Tx latency optimal setting */
        for (i = 0; i < 4; i++) {
 -              /* Set the latency optimal bit */
 -              data = (i == 1) ? 0x0 : 0x6;
 -              vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
 -                              data << DPIO_FRC_LATENCY_SHFIT);
 -
                /* Set the upar bit */
                data = (i == 1) ? 0x0 : 0x1;
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
@@@ -1676,18 -1689,26 +1684,26 @@@ void intel_hdmi_init_connector(struct i
  
        switch (port) {
        case PORT_B:
-               intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+               if (IS_BROXTON(dev_priv))
+                       intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
+               else
+                       intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
                intel_encoder->hpd_pin = HPD_PORT_B;
                break;
        case PORT_C:
-               intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+               if (IS_BROXTON(dev_priv))
+                       intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
+               else
+                       intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
                intel_encoder->hpd_pin = HPD_PORT_C;
                break;
        case PORT_D:
-               if (IS_CHERRYVIEW(dev))
-                       intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
+               if (WARN_ON(IS_BROXTON(dev_priv)))
+                       intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
+               else if (IS_CHERRYVIEW(dev_priv))
+                       intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
                else
-                       intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+                       intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
                intel_encoder->hpd_pin = HPD_PORT_D;
                break;
        case PORT_A:
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
  
- struct gmbus_port {
+ struct gmbus_pin {
        const char *name;
        int reg;
  };
  
- static const struct gmbus_port gmbus_ports[] = {
-       { "ssc", GPIOB },
-       { "vga", GPIOA },
-       { "panel", GPIOC },
-       { "dpc", GPIOD },
-       { "dpb", GPIOE },
-       { "dpd", GPIOF },
+ /* Map gmbus pin pairs to names and registers. */
+ static const struct gmbus_pin gmbus_pins[] = {
+       [GMBUS_PIN_SSC] = { "ssc", GPIOB },
+       [GMBUS_PIN_VGADDC] = { "vga", GPIOA },
+       [GMBUS_PIN_PANEL] = { "panel", GPIOC },
+       [GMBUS_PIN_DPC] = { "dpc", GPIOD },
+       [GMBUS_PIN_DPB] = { "dpb", GPIOE },
+       [GMBUS_PIN_DPD] = { "dpd", GPIOF },
  };
  
+ static const struct gmbus_pin gmbus_pins_bxt[] = {
+       [GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB },
+       [GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC },
+       [GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD },
+ };
+ /* pin is expected to be valid */
+ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
+                                            unsigned int pin)
+ {
+       if (IS_BROXTON(dev_priv))
+               return &gmbus_pins_bxt[pin];
+       else
+               return &gmbus_pins[pin];
+ }
+ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+                             unsigned int pin)
+ {
+       unsigned int size;
+       if (IS_BROXTON(dev_priv))
+               size = ARRAY_SIZE(gmbus_pins_bxt);
+       else
+               size = ARRAY_SIZE(gmbus_pins);
+       return pin < size && get_gmbus_pin(dev_priv, pin)->reg;
+ }
  /* Intel GPIO access functions */
  
  #define I2C_RISEFALL_TIME 10
@@@ -182,15 -212,15 +212,15 @@@ intel_gpio_post_xfer(struct i2c_adapte
  }
  
  static void
- intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
+ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
  {
        struct drm_i915_private *dev_priv = bus->dev_priv;
        struct i2c_algo_bit_data *algo;
  
        algo = &bus->bit_algo;
  
-       /* -1 to map pin pair to gmbus index */
-       bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
+       bus->gpio_reg = dev_priv->gpio_mmio_base +
+               get_gmbus_pin(dev_priv, pin)->reg;
  
        bus->adapter.algo_data = algo;
        algo->setsda = set_data;
@@@ -270,17 -300,18 +300,17 @@@ gmbus_wait_idle(struct drm_i915_privat
  }
  
  static int
 -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
 -              u32 gmbus1_index)
 +gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
 +                    unsigned short addr, u8 *buf, unsigned int len,
 +                    u32 gmbus1_index)
  {
        int reg_offset = dev_priv->gpio_mmio_base;
 -      u16 len = msg->len;
 -      u8 *buf = msg->buf;
  
        I915_WRITE(GMBUS1 + reg_offset,
                   gmbus1_index |
                   GMBUS_CYCLE_WAIT |
                   (len << GMBUS_BYTE_COUNT_SHIFT) |
 -                 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
 +                 (addr << GMBUS_SLAVE_ADDR_SHIFT) |
                   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
        while (len) {
                int ret;
  }
  
  static int
 -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
 +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
 +              u32 gmbus1_index)
  {
 -      int reg_offset = dev_priv->gpio_mmio_base;
 -      u16 len = msg->len;
        u8 *buf = msg->buf;
 +      unsigned int rx_size = msg->len;
 +      unsigned int len;
 +      int ret;
 +
 +      do {
 +              len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
 +
 +              ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
 +                                          buf, len, gmbus1_index);
 +              if (ret)
 +                      return ret;
 +
 +              rx_size -= len;
 +              buf += len;
 +      } while (rx_size != 0);
 +
 +      return 0;
 +}
 +
 +static int
 +gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
 +                     unsigned short addr, u8 *buf, unsigned int len)
 +{
 +      int reg_offset = dev_priv->gpio_mmio_base;
 +      unsigned int chunk_size = len;
        u32 val, loop;
  
        val = loop = 0;
        I915_WRITE(GMBUS3 + reg_offset, val);
        I915_WRITE(GMBUS1 + reg_offset,
                   GMBUS_CYCLE_WAIT |
 -                 (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
 -                 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
 +                 (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
 +                 (addr << GMBUS_SLAVE_ADDR_SHIFT) |
                   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
        while (len) {
                int ret;
                if (ret)
                        return ret;
        }
 +
 +      return 0;
 +}
 +
 +static int
 +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
 +{
 +      u8 *buf = msg->buf;
 +      unsigned int tx_size = msg->len;
 +      unsigned int len;
 +      int ret;
 +
 +      do {
 +              len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
 +
 +              ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
 +              if (ret)
 +                      return ret;
 +
 +              buf += len;
 +              tx_size -= len;
 +      } while (tx_size != 0);
 +
        return 0;
  }
  
@@@ -563,7 -547,9 +593,9 @@@ static const struct i2c_algorithm gmbus
  int intel_setup_gmbus(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret, i;
+       struct intel_gmbus *bus;
+       unsigned int pin;
+       int ret;
  
        if (HAS_PCH_NOP(dev))
                return 0;
        mutex_init(&dev_priv->gmbus_mutex);
        init_waitqueue_head(&dev_priv->gmbus_wait_queue);
  
-       for (i = 0; i < GMBUS_NUM_PORTS; i++) {
-               struct intel_gmbus *bus = &dev_priv->gmbus[i];
-               u32 port = i + 1; /* +1 to map gmbus index to pin pair */
+       for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+               if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+                       continue;
+               bus = &dev_priv->gmbus[pin];
  
                bus->adapter.owner = THIS_MODULE;
                bus->adapter.class = I2C_CLASS_DDC;
                snprintf(bus->adapter.name,
                         sizeof(bus->adapter.name),
                         "i915 gmbus %s",
-                        gmbus_ports[i].name);
+                        get_gmbus_pin(dev_priv, pin)->name);
  
                bus->adapter.dev.parent = &dev->pdev->dev;
                bus->dev_priv = dev_priv;
                bus->adapter.algo = &gmbus_algorithm;
  
                /* By default use a conservative clock rate */
-               bus->reg0 = port | GMBUS_RATE_100KHZ;
+               bus->reg0 = pin | GMBUS_RATE_100KHZ;
  
                /* gmbus seems to be broken on i830 */
                if (IS_I830(dev))
                        bus->force_bit = 1;
  
-               intel_gpio_setup(bus, port);
+               intel_gpio_setup(bus, pin);
  
                ret = i2c_add_adapter(&bus->adapter);
                if (ret)
        return 0;
  
  err:
-       while (--i) {
-               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+       while (--pin) {
+               if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+                       continue;
+               bus = &dev_priv->gmbus[pin];
                i2c_del_adapter(&bus->adapter);
        }
        return ret;
  }
  
  struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
-                                           unsigned port)
+                                           unsigned int pin)
  {
-       WARN_ON(!intel_gmbus_is_port_valid(port));
-       /* -1 to map pin pair to gmbus index */
-       return (intel_gmbus_is_port_valid(port)) ?
-               &dev_priv->gmbus[port - 1].adapter : NULL;
+       if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
+               return NULL;
+       return &dev_priv->gmbus[pin].adapter;
  }
  
  void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
@@@ -648,10 -639,14 +685,14 @@@ void intel_gmbus_force_bit(struct i2c_a
  void intel_teardown_gmbus(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
+       struct intel_gmbus *bus;
+       unsigned int pin;
+       for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+               if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+                       continue;
  
-       for (i = 0; i < GMBUS_NUM_PORTS; i++) {
-               struct intel_gmbus *bus = &dev_priv->gmbus[i];
+               bus = &dev_priv->gmbus[pin];
                i2c_del_adapter(&bus->adapter);
        }
  }
  #define GEN8_CTX_FORCE_RESTORE (1<<2)
  #define GEN8_CTX_L3LLC_COHERENT (1<<5)
  #define GEN8_CTX_PRIVILEGE (1<<8)
+ #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
+       const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \
+               ppgtt->pdp.page_directory[n]->daddr : \
+               ppgtt->scratch_pd->daddr; \
+       reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
+       reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
+ }
  enum {
        ADVANCED_CONTEXT = 0,
        LEGACY_CONTEXT,
@@@ -265,7 -274,8 +274,8 @@@ static uint64_t execlists_ctx_descripto
  
        desc = GEN8_CTX_VALID;
        desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
-       desc |= GEN8_CTX_L3LLC_COHERENT;
+       if (IS_GEN8(ctx_obj->base.dev))
+               desc |= GEN8_CTX_L3LLC_COHERENT;
        desc |= GEN8_CTX_PRIVILEGE;
        desc |= lrca;
        desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
@@@ -305,21 -315,24 +315,24 @@@ static void execlists_elsp_write(struc
        desc[3] = (u32)(temp >> 32);
        desc[2] = (u32)temp;
  
-       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-       I915_WRITE(RING_ELSP(ring), desc[1]);
-       I915_WRITE(RING_ELSP(ring), desc[0]);
-       I915_WRITE(RING_ELSP(ring), desc[3]);
+       spin_lock(&dev_priv->uncore.lock);
+       intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+       I915_WRITE_FW(RING_ELSP(ring), desc[1]);
+       I915_WRITE_FW(RING_ELSP(ring), desc[0]);
+       I915_WRITE_FW(RING_ELSP(ring), desc[3]);
  
        /* The context is automatically loaded after the following */
-       I915_WRITE(RING_ELSP(ring), desc[2]);
+       I915_WRITE_FW(RING_ELSP(ring), desc[2]);
  
        /* ELSP is a wo register, so use another nearby reg for posting instead */
-       POSTING_READ(RING_EXECLIST_STATUS(ring));
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
+       intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+       spin_unlock(&dev_priv->uncore.lock);
  }
  
  static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
                                    struct drm_i915_gem_object *ring_obj,
+                                   struct i915_hw_ppgtt *ppgtt,
                                    u32 tail)
  {
        struct page *page;
        reg_state[CTX_RING_TAIL+1] = tail;
        reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
  
+       /* True PPGTT with dynamic page allocation: update PDP registers and
+        * point the unallocated PDPs to the scratch page
+        */
+       if (ppgtt) {
+               ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+               ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+               ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+               ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+       }
        kunmap_atomic(reg_state);
  
        return 0;
@@@ -349,7 -372,7 +372,7 @@@ static void execlists_submit_contexts(s
        WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
        WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
  
-       execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
+       execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
  
        if (to1) {
                ringbuf1 = to1->engine[ring->id].ringbuf;
                WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
                WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
  
-               execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
+               execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
        }
  
        execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
@@@ -393,26 -416,6 +416,26 @@@ static void execlists_context_unqueue(s
                }
        }
  
 +      if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
 +              /*
 +               * WaIdleLiteRestore: make sure we never cause a lite
 +               * restore with HEAD==TAIL
 +               */
 +              if (req0 && req0->elsp_submitted) {
 +                      /*
 +                       * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
 +                       * as we resubmit the request. See gen8_emit_request()
 +                       * for where we prepare the padding after the end of the
 +                       * request.
 +                       */
 +                      struct intel_ringbuffer *ringbuf;
 +
 +                      ringbuf = req0->ctx->engine[ring->id].ringbuf;
 +                      req0->tail += 8;
 +                      req0->tail &= ringbuf->size - 1;
 +              }
 +      }
 +
        WARN_ON(req1 && req1->elsp_submitted);
  
        execlists_submit_contexts(ring, req0->ctx, req0->tail,
@@@ -520,8 -523,6 +543,6 @@@ static int execlists_context_queue(stru
                                   struct drm_i915_gem_request *request)
  {
        struct drm_i915_gem_request *cursor;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       unsigned long flags;
        int num_elements = 0;
  
        if (to != ring->default_context)
                request->ring = ring;
                request->ctx = to;
                kref_init(&request->ref);
-               request->uniq = dev_priv->request_uniq++;
                i915_gem_context_reference(request->ctx);
        } else {
                i915_gem_request_reference(request);
        }
        request->tail = tail;
  
-       intel_runtime_pm_get(dev_priv);
-       spin_lock_irqsave(&ring->execlist_lock, flags);
+       spin_lock_irq(&ring->execlist_lock);
  
        list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
                if (++num_elements > 2)
        if (num_elements == 0)
                execlists_context_unqueue(ring);
  
-       spin_unlock_irqrestore(&ring->execlist_lock, flags);
+       spin_unlock_irq(&ring->execlist_lock);
  
        return 0;
  }
@@@ -631,6 -629,173 +649,173 @@@ static int execlists_move_to_gpu(struc
        return logical_ring_invalidate_all_caches(ringbuf, ctx);
  }
  
+ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
+                                           struct intel_context *ctx)
+ {
+       int ret;
+       if (ctx != request->ring->default_context) {
+               ret = intel_lr_context_pin(request->ring, ctx);
+               if (ret)
+                       return ret;
+       }
+       request->ringbuf = ctx->engine[request->ring->id].ringbuf;
+       request->ctx     = ctx;
+       i915_gem_context_reference(request->ctx);
+       return 0;
+ }
+ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
+                                      struct intel_context *ctx,
+                                      int bytes)
+ {
+       struct intel_engine_cs *ring = ringbuf->ring;
+       struct drm_i915_gem_request *request;
+       int ret, new_space;
+       if (intel_ring_space(ringbuf) >= bytes)
+               return 0;
+       list_for_each_entry(request, &ring->request_list, list) {
+               /*
+                * The request queue is per-engine, so can contain requests
+                * from multiple ringbuffers. Here, we must ignore any that
+                * aren't from the ringbuffer we're considering.
+                */
+               struct intel_context *ctx = request->ctx;
+               if (ctx->engine[ring->id].ringbuf != ringbuf)
+                       continue;
+               /* Would completion of this request free enough space? */
+               new_space = __intel_ring_space(request->postfix, ringbuf->tail,
+                                      ringbuf->size);
+               if (new_space >= bytes)
+                       break;
+       }
+       if (WARN_ON(&request->list == &ring->request_list))
+               return -ENOSPC;
+       ret = i915_wait_request(request);
+       if (ret)
+               return ret;
+       i915_gem_retire_requests_ring(ring);
+       WARN_ON(intel_ring_space(ringbuf) < new_space);
+       return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
+ }
+ /*
+  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+  * @ringbuf: Logical Ringbuffer to advance.
+  *
+  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
+  * really happens during submission is that the context and current tail will be placed
+  * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
+  * point, the tail *inside* the context is updated and the ELSP written to.
+  */
+ static void
+ intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+                                     struct intel_context *ctx,
+                                     struct drm_i915_gem_request *request)
+ {
+       struct intel_engine_cs *ring = ringbuf->ring;
+       intel_logical_ring_advance(ringbuf);
+       if (intel_ring_stopped(ring))
+               return;
+       execlists_context_queue(ring, ctx, ringbuf->tail, request);
+ }
+ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
+                                   struct intel_context *ctx)
+ {
+       uint32_t __iomem *virt;
+       int rem = ringbuf->size - ringbuf->tail;
+       if (ringbuf->space < rem) {
+               int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
+               if (ret)
+                       return ret;
+       }
+       virt = ringbuf->virtual_start + ringbuf->tail;
+       rem /= 4;
+       while (rem--)
+               iowrite32(MI_NOOP, virt++);
+       ringbuf->tail = 0;
+       intel_ring_update_space(ringbuf);
+       return 0;
+ }
+ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
+                               struct intel_context *ctx, int bytes)
+ {
+       int ret;
+       if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
+               ret = logical_ring_wrap_buffer(ringbuf, ctx);
+               if (unlikely(ret))
+                       return ret;
+       }
+       if (unlikely(ringbuf->space < bytes)) {
+               ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+               if (unlikely(ret))
+                       return ret;
+       }
+       return 0;
+ }
+ /**
+  * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
+  *
+  * @ringbuf: Logical ringbuffer.
+  * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
+  *
+  * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
+  * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
+  * and also preallocates a request (every workload submission is still mediated through
+  * requests, same as it did with legacy ringbuffer submission).
+  *
+  * Return: non-zero if the ringbuffer is not ready to be written to.
+  */
+ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+                                   struct intel_context *ctx, int num_dwords)
+ {
+       struct intel_engine_cs *ring = ringbuf->ring;
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+       ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+                                  dev_priv->mm.interruptible);
+       if (ret)
+               return ret;
+       ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
+       if (ret)
+               return ret;
+       /* Preallocate the olr before touching the ring */
+       ret = i915_gem_request_alloc(ring, ctx);
+       if (ret)
+               return ret;
+       ringbuf->space -= num_dwords * sizeof(uint32_t);
+       return 0;
+ }
  /**
   * execlists_submission() - submit a batchbuffer for execution, Execlists style
   * @dev: DRM device.
@@@ -742,8 -907,6 +927,6 @@@ int intel_execlists_submission(struct d
  void intel_execlists_retire_requests(struct intel_engine_cs *ring)
  {
        struct drm_i915_gem_request *req, *tmp;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       unsigned long flags;
        struct list_head retired_list;
  
        WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
                return;
  
        INIT_LIST_HEAD(&retired_list);
-       spin_lock_irqsave(&ring->execlist_lock, flags);
+       spin_lock_irq(&ring->execlist_lock);
        list_replace_init(&ring->execlist_retired_req_list, &retired_list);
-       spin_unlock_irqrestore(&ring->execlist_lock, flags);
+       spin_unlock_irq(&ring->execlist_lock);
  
        list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
                struct intel_context *ctx = req->ctx;
  
                if (ctx_obj && (ctx != ring->default_context))
                        intel_lr_context_unpin(ring, ctx);
-               intel_runtime_pm_put(dev_priv);
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
@@@ -807,30 -969,6 +989,6 @@@ int logical_ring_flush_all_caches(struc
        return 0;
  }
  
- /*
-  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
-  * @ringbuf: Logical Ringbuffer to advance.
-  *
-  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
-  * really happens during submission is that the context and current tail will be placed
-  * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
-  * point, the tail *inside* the context is updated and the ELSP written to.
-  */
- static void
- intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
-                                     struct intel_context *ctx,
-                                     struct drm_i915_gem_request *request)
- {
-       struct intel_engine_cs *ring = ringbuf->ring;
-       intel_logical_ring_advance(ringbuf);
-       if (intel_ring_stopped(ring))
-               return;
-       execlists_context_queue(ring, ctx, ringbuf->tail, request);
- }
  static int intel_lr_context_pin(struct intel_engine_cs *ring,
                struct intel_context *ctx)
  {
@@@ -875,219 -1013,6 +1033,6 @@@ void intel_lr_context_unpin(struct inte
        }
  }
  
- static int logical_ring_alloc_request(struct intel_engine_cs *ring,
-                                     struct intel_context *ctx)
- {
-       struct drm_i915_gem_request *request;
-       struct drm_i915_private *dev_private = ring->dev->dev_private;
-       int ret;
-       if (ring->outstanding_lazy_request)
-               return 0;
-       request = kzalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL)
-               return -ENOMEM;
-       if (ctx != ring->default_context) {
-               ret = intel_lr_context_pin(ring, ctx);
-               if (ret) {
-                       kfree(request);
-                       return ret;
-               }
-       }
-       kref_init(&request->ref);
-       request->ring = ring;
-       request->uniq = dev_private->request_uniq++;
-       ret = i915_gem_get_seqno(ring->dev, &request->seqno);
-       if (ret) {
-               intel_lr_context_unpin(ring, ctx);
-               kfree(request);
-               return ret;
-       }
-       request->ctx = ctx;
-       i915_gem_context_reference(request->ctx);
-       request->ringbuf = ctx->engine[ring->id].ringbuf;
-       ring->outstanding_lazy_request = request;
-       return 0;
- }
- static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
-                                    int bytes)
- {
-       struct intel_engine_cs *ring = ringbuf->ring;
-       struct drm_i915_gem_request *request;
-       int ret;
-       if (intel_ring_space(ringbuf) >= bytes)
-               return 0;
-       list_for_each_entry(request, &ring->request_list, list) {
-               /*
-                * The request queue is per-engine, so can contain requests
-                * from multiple ringbuffers. Here, we must ignore any that
-                * aren't from the ringbuffer we're considering.
-                */
-               struct intel_context *ctx = request->ctx;
-               if (ctx->engine[ring->id].ringbuf != ringbuf)
-                       continue;
-               /* Would completion of this request free enough space? */
-               if (__intel_ring_space(request->tail, ringbuf->tail,
-                                      ringbuf->size) >= bytes) {
-                       break;
-               }
-       }
-       if (&request->list == &ring->request_list)
-               return -ENOSPC;
-       ret = i915_wait_request(request);
-       if (ret)
-               return ret;
-       i915_gem_retire_requests_ring(ring);
-       return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
- }
- static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
-                                      struct intel_context *ctx,
-                                      int bytes)
- {
-       struct intel_engine_cs *ring = ringbuf->ring;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long end;
-       int ret;
-       ret = logical_ring_wait_request(ringbuf, bytes);
-       if (ret != -ENOSPC)
-               return ret;
-       /* Force the context submission in case we have been skipping it */
-       intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
-       /* With GEM the hangcheck timer should kick us out of the loop,
-        * leaving it early runs the risk of corrupting GEM state (due
-        * to running on almost untested codepaths). But on resume
-        * timers don't work yet, so prevent a complete hang in that
-        * case by choosing an insanely large timeout. */
-       end = jiffies + 60 * HZ;
-       ret = 0;
-       do {
-               if (intel_ring_space(ringbuf) >= bytes)
-                       break;
-               msleep(1);
-               if (dev_priv->mm.interruptible && signal_pending(current)) {
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-               ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-                                          dev_priv->mm.interruptible);
-               if (ret)
-                       break;
-               if (time_after(jiffies, end)) {
-                       ret = -EBUSY;
-                       break;
-               }
-       } while (1);
-       return ret;
- }
- static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
-                                   struct intel_context *ctx)
- {
-       uint32_t __iomem *virt;
-       int rem = ringbuf->size - ringbuf->tail;
-       if (ringbuf->space < rem) {
-               int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-               if (ret)
-                       return ret;
-       }
-       virt = ringbuf->virtual_start + ringbuf->tail;
-       rem /= 4;
-       while (rem--)
-               iowrite32(MI_NOOP, virt++);
-       ringbuf->tail = 0;
-       intel_ring_update_space(ringbuf);
-       return 0;
- }
- static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
-                               struct intel_context *ctx, int bytes)
- {
-       int ret;
-       if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-               ret = logical_ring_wrap_buffer(ringbuf, ctx);
-               if (unlikely(ret))
-                       return ret;
-       }
-       if (unlikely(ringbuf->space < bytes)) {
-               ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
-               if (unlikely(ret))
-                       return ret;
-       }
-       return 0;
- }
- /**
-  * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
-  *
-  * @ringbuf: Logical ringbuffer.
-  * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
-  *
-  * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
-  * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
-  * and also preallocates a request (every workload submission is still mediated through
-  * requests, same as it did with legacy ringbuffer submission).
-  *
-  * Return: non-zero if the ringbuffer is not ready to be written to.
-  */
- int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
-                            struct intel_context *ctx, int num_dwords)
- {
-       struct intel_engine_cs *ring = ringbuf->ring;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-                                  dev_priv->mm.interruptible);
-       if (ret)
-               return ret;
-       ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
-       if (ret)
-               return ret;
-       /* Preallocate the olr before touching the ring */
-       ret = logical_ring_alloc_request(ring, ctx);
-       if (ret)
-               return ret;
-       ringbuf->space -= num_dwords * sizeof(uint32_t);
-       return 0;
- }
  static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
                                               struct intel_context *ctx)
  {
@@@ -1282,6 -1207,7 +1227,7 @@@ static int gen8_emit_flush_render(struc
  {
        struct intel_engine_cs *ring = ringbuf->ring;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       bool vf_flush_wa;
        u32 flags = 0;
        int ret;
  
                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
        }
  
-       ret = intel_logical_ring_begin(ringbuf, ctx, 6);
+       /*
+        * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
+        * control.
+        */
+       vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
+                     flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
+       ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
        if (ret)
                return ret;
  
+       if (vf_flush_wa) {
+               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+       }
        intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf, flags);
        intel_logical_ring_emit(ringbuf, scratch_addr);
@@@ -1335,12 -1277,7 +1297,12 @@@ static int gen8_emit_request(struct int
        u32 cmd;
        int ret;
  
 -      ret = intel_logical_ring_begin(ringbuf, request->ctx, 6);
 +      /*
 +       * Reserve space for 2 NOOPs at the end of each request to be
 +       * used as a workaround for not being allowed to do lite
 +       * restore with HEAD==TAIL (WaIdleLiteRestore).
 +       */
 +      ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
        if (ret)
                return ret;
  
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
  
 +      /*
 +       * Here we add two extra NOOPs as padding to avoid
 +       * lite restore of a context with HEAD==TAIL.
 +       */
 +      intel_logical_ring_emit(ringbuf, MI_NOOP);
 +      intel_logical_ring_emit(ringbuf, MI_NOOP);
 +      intel_logical_ring_advance(ringbuf);
 +
        return 0;
  }
  
@@@ -1437,6 -1366,7 +1399,7 @@@ void intel_logical_ring_cleanup(struct 
                ring->cleanup(ring);
  
        i915_cmd_parser_fini_ring(ring);
+       i915_gem_batch_pool_fini(&ring->batch_pool);
  
        if (ring->status_page.obj) {
                kunmap(sg_page(ring->status_page.obj->pages->sgl));
@@@ -1454,6 -1384,7 +1417,7 @@@ static int logical_ring_init(struct drm
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
+       i915_gem_batch_pool_init(dev, &ring->batch_pool);
        init_waitqueue_head(&ring->irq_queue);
  
        INIT_LIST_HEAD(&ring->execlist_queue);
@@@ -1806,14 -1737,14 +1770,14 @@@ populate_lr_context(struct intel_contex
        reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
        reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
        reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
-       reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3]->daddr);
-       reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3]->daddr);
-       reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2]->daddr);
-       reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2]->daddr);
-       reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1]->daddr);
-       reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1]->daddr);
-       reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0]->daddr);
-       reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0]->daddr);
+       /* With dynamic page allocation, PDPs may not be allocated at this point,
+        * Point the unallocated PDPs to the scratch page
+        */
+       ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+       ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+       ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+       ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
        if (ring->id == RCS) {
                reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
                reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
@@@ -1930,7 -1861,7 +1894,7 @@@ int intel_lr_context_deferred_create(st
  
        context_size = round_up(get_lr_context_size(ring), 4096);
  
-       ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
+       ctx_obj = i915_gem_alloc_object(dev, context_size);
        if (IS_ERR(ctx_obj)) {
                ret = PTR_ERR(ctx_obj);
                DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
@@@ -360,14 -360,6 +360,14 @@@ static void __intel_uncore_early_saniti
                __raw_i915_write32(dev_priv, GTFIFODBG,
                                   __raw_i915_read32(dev_priv, GTFIFODBG));
  
 +      /* WaDisableShadowRegForCpd:chv */
 +      if (IS_CHERRYVIEW(dev)) {
 +              __raw_i915_write32(dev_priv, GTFIFOCTL,
 +                                 __raw_i915_read32(dev_priv, GTFIFOCTL) |
 +                                 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 +                                 GT_FIFO_CTL_RC6_POLICY_STALL);
 +      }
 +
        intel_uncore_forcewake_reset(dev, restore_forcewake);
  }
  
@@@ -383,6 -375,26 +383,26 @@@ void intel_uncore_sanitize(struct drm_d
        intel_disable_gt_powersave(dev);
  }
  
+ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+                                        enum forcewake_domains fw_domains)
+ {
+       struct intel_uncore_forcewake_domain *domain;
+       enum forcewake_domain_id id;
+       if (!dev_priv->uncore.funcs.force_wake_get)
+               return;
+       fw_domains &= dev_priv->uncore.fw_domains;
+       for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+               if (domain->wake_count++)
+                       fw_domains &= ~(1 << id);
+       }
+       if (fw_domains)
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ }
  /**
   * intel_uncore_forcewake_get - grab forcewake domain references
   * @dev_priv: i915 device instance
@@@ -400,41 -412,39 +420,39 @@@ void intel_uncore_forcewake_get(struct 
                                enum forcewake_domains fw_domains)
  {
        unsigned long irqflags;
-       struct intel_uncore_forcewake_domain *domain;
-       enum forcewake_domain_id id;
  
        if (!dev_priv->uncore.funcs.force_wake_get)
                return;
  
        WARN_ON(dev_priv->pm.suspended);
  
-       fw_domains &= dev_priv->uncore.fw_domains;
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-       for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
-               if (domain->wake_count++)
-                       fw_domains &= ~(1 << id);
-       }
-       if (fw_domains)
-               dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+       __intel_uncore_forcewake_get(dev_priv, fw_domains);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
  /**
-  * intel_uncore_forcewake_put - release a forcewake domain reference
+  * intel_uncore_forcewake_get__locked - grab forcewake domain references
   * @dev_priv: i915 device instance
-  * @fw_domains: forcewake domains to put references
+  * @fw_domains: forcewake domains to get reference on
   *
-  * This function drops the device-level forcewakes for specified
-  * domains obtained by intel_uncore_forcewake_get().
+  * See intel_uncore_forcewake_get(). This variant places the onus
+  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
   */
- void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
-                               enum forcewake_domains fw_domains)
+ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+                                       enum forcewake_domains fw_domains)
+ {
+       assert_spin_locked(&dev_priv->uncore.lock);
+       if (!dev_priv->uncore.funcs.force_wake_get)
+               return;
+       __intel_uncore_forcewake_get(dev_priv, fw_domains);
+ }
+ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+                                        enum forcewake_domains fw_domains)
  {
-       unsigned long irqflags;
        struct intel_uncore_forcewake_domain *domain;
        enum forcewake_domain_id id;
  
  
        fw_domains &= dev_priv->uncore.fw_domains;
  
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
                if (WARN_ON(domain->wake_count == 0))
                        continue;
                domain->wake_count++;
                fw_domain_arm_timer(domain);
        }
+ }
  
+ /**
+  * intel_uncore_forcewake_put - release a forcewake domain reference
+  * @dev_priv: i915 device instance
+  * @fw_domains: forcewake domains to put references
+  *
+  * This function drops the device-level forcewakes for specified
+  * domains obtained by intel_uncore_forcewake_get().
+  */
+ void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+                               enum forcewake_domains fw_domains)
+ {
+       unsigned long irqflags;
+       if (!dev_priv->uncore.funcs.force_wake_put)
+               return;
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       __intel_uncore_forcewake_put(dev_priv, fw_domains);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
+ /**
+  * intel_uncore_forcewake_put__locked - grab forcewake domain references
+  * @dev_priv: i915 device instance
+  * @fw_domains: forcewake domains to get reference on
+  *
+  * See intel_uncore_forcewake_put(). This variant places the onus
+  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
+  */
+ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+                                       enum forcewake_domains fw_domains)
+ {
+       assert_spin_locked(&dev_priv->uncore.lock);
+       if (!dev_priv->uncore.funcs.force_wake_put)
+               return;
+       __intel_uncore_forcewake_put(dev_priv, fw_domains);
+ }
  void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
  {
        struct intel_uncore_forcewake_domain *domain;
diff --combined include/drm/drm_crtc.h
@@@ -466,7 -466,7 +466,7 @@@ struct drm_crtc 
        int framedur_ns, linedur_ns, pixeldur_ns;
  
        /* if you are using the helper */
 -      void *helper_private;
 +      const void *helper_private;
  
        struct drm_object_properties properties;
  
@@@ -596,7 -596,7 +596,7 @@@ struct drm_encoder 
        struct drm_crtc *crtc;
        struct drm_bridge *bridge;
        const struct drm_encoder_funcs *funcs;
 -      void *helper_private;
 +      const void *helper_private;
  };
  
  /* should we poll this connector for connects and disconnects */
@@@ -700,7 -700,7 +700,7 @@@ struct drm_connector 
        /* requested DPMS state */
        int dpms;
  
 -      void *helper_private;
 +      const void *helper_private;
  
        /* forced on connector */
        struct drm_cmdline_mode cmdline_mode;
@@@ -863,7 -863,7 +863,7 @@@ struct drm_plane 
  
        enum drm_plane_type type;
  
 -      void *helper_private;
 +      const void *helper_private;
  
        struct drm_plane_state *state;
  };
@@@ -974,7 -974,7 +974,7 @@@ struct drm_mode_set 
   * struct drm_mode_config_funcs - basic driver provided mode setting functions
   * @fb_create: create a new framebuffer object
   * @output_poll_changed: function to handle output configuration changes
 - * @atomic_check: check whether a give atomic state update is possible
 + * @atomic_check: check whether a given atomic state update is possible
   * @atomic_commit: commit an atomic state update previously verified with
   *    atomic_check()
   *
@@@ -1263,6 -1263,7 +1263,7 @@@ extern int drm_plane_init(struct drm_de
                          bool is_primary);
  extern void drm_plane_cleanup(struct drm_plane *plane);
  extern unsigned int drm_plane_index(struct drm_plane *plane);
+ extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
  extern void drm_plane_force_disable(struct drm_plane *plane);
  extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
                                        u32 format);