Merge tag 'drm-intel-next-2014-10-03-no-ppgtt' of git://anongit.freedesktop.org/drm...
authorDave Airlie <airlied@redhat.com>
Tue, 28 Oct 2014 02:37:58 +0000 (12:37 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 28 Oct 2014 02:37:58 +0000 (12:37 +1000)
Ok, new attempt, this time around with full ppgtt disabled again.

drm-intel-next-2014-10-03:
- first batch of skl stage 1 enabling
- fixes from Rodrigo to the PSR, fbc and sink crc code
- kerneldoc for the frontbuffer tracking code, runtime pm code and the basic
  interrupt enable/disable functions
- smaller stuff all over
drm-intel-next-2014-09-19:
- bunch more i830M fixes from Ville
- full ppgtt now again enabled by default
- more ppgtt fixes from Michel Thierry and Chris Wilson
- plane config work from Gustavo Padovan
- spinlock clarifications
- piles of smaller improvements all over, as usual

* tag 'drm-intel-next-2014-10-03-no-ppgtt' of git://anongit.freedesktop.org/drm-intel: (114 commits)
  Revert "drm/i915: Enable full PPGTT on gen7"
  drm/i915: Update DRIVER_DATE to 20141003
  drm/i915: Remove the duplicated logic between the two shrink phases
  drm/i915: kerneldoc for interrupt enable/disable functions
  drm/i915: Use dev_priv instead of dev in irq setup functions
  drm/i915: s/pm._irqs_disabled/pm.irqs_enabled/
  drm/i915: Clear TX FIFO reset master override bits on chv
  drm/i915: Make sure hardware uses the correct swing margin/deemph bits on chv
  drm/i915: make sink_crc return -EIO on aux read/write failure
  drm/i915: Constify send buffer for intel_dp_aux_ch
  drm/i915: De-magic the PSR AUX message
  drm/i915: Reinstate error level message for non-simulated gpu hangs
  drm/i915: Kerneldoc for intel_runtime_pm.c
  drm/i915: Call runtime_pm_disable directly
  drm/i915: Move intel_display_set_init_power to intel_runtime_pm.c
  drm/i915: Bikeshed rpm functions name a bit.
  drm/i915: Extract intel_runtime_pm.c
  drm/i915: Remove intel_modeset_suspend_hw
  drm/i915: spelling fixes for frontbuffer tracking kerneldoc
  drm/i915: Tighting frontbuffer tracking around flips
  ...

34 files changed:
Documentation/DocBook/drm.tmpl
arch/x86/kernel/early-quirks.c
drivers/char/agp/intel-gtt.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_frontbuffer.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_runtime_pm.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
include/drm/drm_dp_helper.h
include/drm/i915_pciids.h

index be35bc3..d7cfc98 100644 (file)
@@ -3787,6 +3787,26 @@ int num_ioctls;</synopsis>
       blocks. This excludes a set of SoC platforms with an SGX rendering unit,
       those have basic support through the gma500 drm driver.
     </para>
+    <sect1>
+      <title>Core Driver Infrastructure</title>
+      <para>
+       This section covers core driver infrastructure used by both the display
+       and the GEM parts of the driver.
+      </para>
+      <sect2>
+        <title>Runtime Power Management</title>
+!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
+!Idrivers/gpu/drm/i915/intel_runtime_pm.c
+      </sect2>
+      <sect2>
+        <title>Interrupt Handling</title>
+!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
+!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
+      </sect2>
+    </sect1>
     <sect1>
       <title>Display Hardware Handling</title>
       <para>
@@ -3803,6 +3823,13 @@ int num_ioctls;</synopsis>
           configuration change.
         </para>
       </sect2>
+      <sect2>
+        <title>Frontbuffer Tracking</title>
+!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
+!Idrivers/gpu/drm/i915/intel_frontbuffer.c
+!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
+!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
+      </sect2>
       <sect2>
         <title>Plane Configuration</title>
         <para>
@@ -3932,5 +3959,6 @@ int num_ioctls;</synopsis>
       </sect2>
     </sect1>
   </chapter>
+!Cdrivers/gpu/drm/i915/i915_irq.c
 </part>
 </book>
index 2e1a685..fe9f0b7 100644 (file)
@@ -455,6 +455,23 @@ struct intel_stolen_funcs {
        u32 (*base)(int num, int slot, int func, size_t size);
 };
 
+static size_t __init gen9_stolen_size(int num, int slot, int func)
+{
+       u16 gmch_ctrl;
+
+       gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+       gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
+       gmch_ctrl &= BDW_GMCH_GMS_MASK;
+
+       if (gmch_ctrl < 0xf0)
+               return gmch_ctrl << 25; /* 32 MB units */
+       else
+               /* 4MB increments starting at 0xf0 for 4MB */
+               return (gmch_ctrl - 0xf0 + 1) << 22;
+}
+
+typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
 static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
        .base = i830_stolen_base,
        .size = i830_stolen_size,
@@ -490,6 +507,11 @@ static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
        .size = gen8_stolen_size,
 };
 
+static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
+       .base = intel_stolen_base,
+       .size = gen9_stolen_size,
+};
+
 static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
        .base = intel_stolen_base,
        .size = chv_stolen_size,
@@ -523,6 +545,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
        INTEL_BDW_M_IDS(&gen8_stolen_funcs),
        INTEL_BDW_D_IDS(&gen8_stolen_funcs),
        INTEL_CHV_IDS(&chv_stolen_funcs),
+       INTEL_SKL_IDS(&gen9_stolen_funcs),
 };
 
 static void __init intel_graphics_stolen(int num, int slot, int func)
index 9a024f8..f333482 100644 (file)
@@ -153,7 +153,6 @@ static struct page *i8xx_alloc_pages(void)
                __free_pages(page, 2);
                return NULL;
        }
-       get_page(page);
        atomic_inc(&agp_bridge->current_memory_agp);
        return page;
 }
@@ -164,7 +163,6 @@ static void i8xx_destroy_pages(struct page *page)
                return;
 
        set_pages_wb(page, 4);
-       put_page(page);
        __free_pages(page, 2);
        atomic_dec(&agp_bridge->current_memory_agp);
 }
@@ -300,7 +298,6 @@ static int intel_gtt_setup_scratch_page(void)
        page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
        if (page == NULL)
                return -ENOMEM;
-       get_page(page);
        set_pages_uc(page, 1);
 
        if (intel_private.needs_dmar) {
@@ -560,7 +557,6 @@ static void intel_gtt_teardown_scratch_page(void)
        set_pages_wb(intel_private.scratch_page, 1);
        pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       put_page(intel_private.scratch_page);
        __free_page(intel_private.scratch_page);
 }
 
index c1dd485..3a6bce0 100644 (file)
@@ -11,7 +11,9 @@ i915-y := i915_drv.o \
          i915_params.o \
           i915_suspend.o \
          i915_sysfs.o \
-         intel_pm.o
+         intel_pm.o \
+         intel_runtime_pm.o
+
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
 
@@ -43,6 +45,7 @@ i915-y += intel_renderstate_gen6.o \
 # modesetting core code
 i915-y += intel_bios.o \
          intel_display.o \
+         intel_frontbuffer.o \
          intel_modes.o \
          intel_overlay.o \
          intel_sideband.o \
index 593b657..86b3ae0 100644 (file)
@@ -847,12 +847,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
        if (!ring->needs_cmd_parser)
                return false;
 
-       /*
-        * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
-        * disabled. That will cause all of the parser's PPGTT checks to
-        * fail. For now, disable parsing when PPGTT is off.
-        */
-       if (USES_PPGTT(ring->dev))
+       if (!USES_PPGTT(ring->dev))
                return false;
 
        return (i915.enable_cmd_parser == 1);
@@ -888,8 +883,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
                 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
                 */
                if (reg_addr == OACONTROL) {
-                       if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
+                       if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+                               DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
                                return false;
+                       }
 
                        if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
                                *oacontrol_set = (cmd[2] != 0);
index 063b448..da4036d 100644 (file)
@@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
        struct intel_crtc *crtc;
        int ret;
 
@@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                const char plane = plane_name(crtc->plane);
                struct intel_unpin_work *work;
 
-               spin_lock_irqsave(&dev->event_lock, flags);
+               spin_lock_irq(&dev->event_lock);
                work = crtc->unpin_work;
                if (work == NULL) {
                        seq_printf(m, "No flip due on pipe %c (plane %c)\n",
@@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
                        }
                }
-               spin_unlock_irqrestore(&dev->event_lock, flags);
+               spin_unlock_irq(&dev->event_lock);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                }
 
                for_each_pipe(dev_priv, pipe) {
-                       if (!intel_display_power_enabled(dev_priv,
+                       if (!intel_display_power_is_enabled(dev_priv,
                                                POWER_DOMAIN_PIPE(pipe))) {
                                seq_printf(m, "Pipe %c power disabled\n",
                                           pipe_name(pipe));
@@ -1986,7 +1985,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                           I915_READ(MAD_DIMM_C2));
                seq_printf(m, "TILECTL = 0x%08x\n",
                           I915_READ(TILECTL));
-               if (IS_GEN8(dev))
+               if (INTEL_INFO(dev)->gen >= 8)
                        seq_printf(m, "GAMTARBMODE = 0x%08x\n",
                                   I915_READ(GAMTARBMODE));
                else
index 1403b01..85d14e1 100644 (file)
@@ -1338,14 +1338,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_power_domains_init_hw(dev_priv);
 
-       /*
-        * We enable some interrupt sources in our postinstall hooks, so mark
-        * interrupts as enabled _before_ actually enabling them to avoid
-        * special cases in our ordering checks.
-        */
-       dev_priv->pm._irqs_disabled = false;
-
-       ret = drm_irq_install(dev, dev->pdev->irq);
+       ret = intel_irq_install(dev_priv);
        if (ret)
                goto cleanup_gem_stolen;
 
@@ -1370,7 +1363,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
                goto cleanup_gem;
 
        /* Only enable hotplug handling once the fbdev is fully set up. */
-       intel_hpd_init(dev);
+       intel_hpd_init(dev_priv);
 
        /*
         * Some ports require correctly set-up hpd registers for detection to
@@ -1534,7 +1527,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
 
        info = (struct intel_device_info *)&dev_priv->info;
 
-       if (IS_VALLEYVIEW(dev))
+       if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
                for_each_pipe(dev_priv, pipe)
                        info->num_sprites[pipe] = 2;
        else
@@ -1614,7 +1607,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
-       spin_lock_init(&dev_priv->backlight_lock);
+       mutex_init(&dev_priv->backlight_lock);
        spin_lock_init(&dev_priv->uncore.lock);
        spin_lock_init(&dev_priv->mm.object_stat_lock);
        spin_lock_init(&dev_priv->mmio_flip_lock);
@@ -1740,7 +1733,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_freewq;
        }
 
-       intel_irq_init(dev);
+       intel_irq_init(dev_priv);
        intel_uncore_sanitize(dev);
 
        /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1798,12 +1791,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (IS_GEN5(dev))
                intel_gpu_ips_init(dev_priv);
 
-       intel_init_runtime_pm(dev_priv);
+       intel_runtime_pm_enable(dev_priv);
 
        return 0;
 
 out_power_well:
-       intel_power_domains_remove(dev_priv);
+       intel_power_domains_fini(dev_priv);
        drm_vblank_cleanup(dev);
 out_gem_unload:
        WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1846,16 +1839,10 @@ int i915_driver_unload(struct drm_device *dev)
                return ret;
        }
 
-       intel_fini_runtime_pm(dev_priv);
+       intel_power_domains_fini(dev_priv);
 
        intel_gpu_ips_teardown();
 
-       /* The i915.ko module is still not prepared to be loaded when
-        * the power well is not enabled, so just enable it in case
-        * we're going to unload/reload. */
-       intel_display_set_init_power(dev_priv, true);
-       intel_power_domains_remove(dev_priv);
-
        i915_teardown_sysfs(dev);
 
        WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
index 055d5e7..bd7978c 100644 (file)
@@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
        CURSOR_OFFSETS,
 };
 
+static const struct intel_device_info intel_skylake_info = {
+       .is_preliminary = 1,
+       .is_skylake = 1,
+       .gen = 9, .num_pipes = 3,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .has_llc = 1,
+       .has_ddi = 1,
+       .has_fbc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       IVB_CURSOR_OFFSETS,
+};
+
 /*
  * Make sure any device matches here are from most specific to most
  * general.  For example, since the Quanta match is based on the subsystem
@@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
        INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),   \
        INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
        INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
-       INTEL_CHV_IDS(&intel_cherryview_info)
+       INTEL_CHV_IDS(&intel_cherryview_info),  \
+       INTEL_SKL_IDS(&intel_skylake_info)
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
        INTEL_PCI_IDS,
@@ -461,6 +475,16 @@ void intel_detect_pch(struct drm_device *dev)
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
                                WARN_ON(!IS_HASWELL(dev));
                                WARN_ON(!IS_ULT(dev));
+                       } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev));
+                               WARN_ON(IS_ULT(dev));
+                       } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev));
+                               WARN_ON(!IS_ULT(dev));
                        } else
                                continue;
 
@@ -575,14 +599,14 @@ static int i915_drm_freeze(struct drm_device *dev)
 
                flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
-               intel_runtime_pm_disable_interrupts(dev);
+               intel_runtime_pm_disable_interrupts(dev_priv);
                intel_hpd_cancel_work(dev_priv);
 
                intel_suspend_encoders(dev_priv);
 
                intel_suspend_gt_powersave(dev);
 
-               intel_modeset_suspend_hw(dev);
+               intel_suspend_hw(dev);
        }
 
        i915_gem_suspend_gtt_mappings(dev);
@@ -680,16 +704,16 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
                }
                mutex_unlock(&dev->struct_mutex);
 
-               intel_runtime_pm_restore_interrupts(dev);
+               /* We need working interrupts for modeset enabling ... */
+               intel_runtime_pm_enable_interrupts(dev_priv);
 
                intel_modeset_init_hw(dev);
 
                {
-                       unsigned long irqflags;
-                       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+                       spin_lock_irq(&dev_priv->irq_lock);
                        if (dev_priv->display.hpd_irq_setup)
                                dev_priv->display.hpd_irq_setup(dev);
-                       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+                       spin_unlock_irq(&dev_priv->irq_lock);
                }
 
                intel_dp_mst_resume(dev);
@@ -703,7 +727,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
                 * bother with the tiny race here where we might loose hotplug
                 * notifications.
                 * */
-               intel_hpd_init(dev);
+               intel_hpd_init(dev_priv);
                /* Config may have changed between suspend and resume */
                drm_helper_hpd_irq_event(dev);
        }
@@ -820,6 +844,9 @@ int i915_reset(struct drm_device *dev)
                }
        }
 
+       if (i915_stop_ring_allow_warn(dev_priv))
+               pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
        if (ret) {
                DRM_ERROR("Failed to reset chip: %i\n", ret);
                mutex_unlock(&dev->struct_mutex);
@@ -1446,12 +1473,12 @@ static int intel_runtime_suspend(struct device *device)
         * intel_mark_idle().
         */
        cancel_work_sync(&dev_priv->rps.work);
-       intel_runtime_pm_disable_interrupts(dev);
+       intel_runtime_pm_disable_interrupts(dev_priv);
 
        ret = intel_suspend_complete(dev_priv);
        if (ret) {
                DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
-               intel_runtime_pm_restore_interrupts(dev);
+               intel_runtime_pm_enable_interrupts(dev_priv);
 
                return ret;
        }
@@ -1511,7 +1538,7 @@ static int intel_runtime_resume(struct device *device)
        i915_gem_init_swizzling(dev);
        gen6_update_ring_freq(dev);
 
-       intel_runtime_pm_restore_interrupts(dev);
+       intel_runtime_pm_enable_interrupts(dev_priv);
        intel_reset_gt_powersave(dev);
 
        if (ret)
index 16a6f6d..9962da2 100644 (file)
@@ -55,7 +55,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20140905"
+#define DRIVER_DATE            "20141003"
 
 enum pipe {
        INVALID_PIPE = -1,
@@ -76,6 +76,14 @@ enum transcoder {
 };
 #define transcoder_name(t) ((t) + 'A')
 
+/*
+ * This is the maximum (across all platforms) number of planes (primary +
+ * sprites) that can be active at the same time on one pipe.
+ *
+ * This value doesn't count the cursor plane.
+ */
+#define I915_MAX_PLANES        3
+
 enum plane {
        PLANE_A = 0,
        PLANE_B,
@@ -551,6 +559,7 @@ struct intel_uncore {
        func(is_ivybridge) sep \
        func(is_valleyview) sep \
        func(is_haswell) sep \
+       func(is_skylake) sep \
        func(is_preliminary) sep \
        func(has_fbc) sep \
        func(has_pipe_cxsr) sep \
@@ -663,6 +672,18 @@ struct i915_fbc {
 
        bool false_color;
 
+       /* Tracks whether the HW is actually enabled, not whether the feature is
+        * possible. */
+       bool enabled;
+
+       /* On gen8 some rings cannont perform fbc clean operation so for now
+        * we are doing this on SW with mmio.
+        * This variable works in the opposite information direction
+        * of ring->fbc_dirty telling software on frontbuffer tracking
+        * to perform the cache clean on sw side.
+        */
+       bool need_sw_cache_clean;
+
        struct intel_fbc_work {
                struct delayed_work work;
                struct drm_crtc *crtc;
@@ -704,6 +725,7 @@ enum intel_pch {
        PCH_IBX,        /* Ibexpeak PCH */
        PCH_CPT,        /* Cougarpoint PCH */
        PCH_LPT,        /* Lynxpoint PCH */
+       PCH_SPT,        /* Sunrisepoint PCH */
        PCH_NOP,
 };
 
@@ -1369,7 +1391,7 @@ struct ilk_wm_values {
  *
  * Our driver uses the autosuspend delay feature, which means we'll only really
  * suspend if we stay with zero refcount for a certain amount of time. The
- * default value is currently very conservative (see intel_init_runtime_pm), but
+ * default value is currently very conservative (see intel_runtime_pm_enable), but
  * it can be changed with the standard runtime PM files from sysfs.
  *
  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
@@ -1382,7 +1404,7 @@ struct ilk_wm_values {
  */
 struct i915_runtime_pm {
        bool suspended;
-       bool _irqs_disabled;
+       bool irqs_enabled;
 };
 
 enum intel_pipe_crc_source {
@@ -1509,7 +1531,7 @@ struct drm_i915_private {
        struct intel_overlay *overlay;
 
        /* backlight registers and fields in struct intel_panel */
-       spinlock_t backlight_lock;
+       struct mutex backlight_lock;
 
        /* LVDS info */
        bool no_aux_handshake;
@@ -2073,6 +2095,7 @@ struct drm_i915_cmd_table {
 #define IS_CHERRYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
 #define IS_BROADWELL(dev)      (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
+#define IS_SKYLAKE(dev)        (INTEL_INFO(dev)->is_skylake)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2080,6 +2103,8 @@ struct drm_i915_cmd_table {
                                 ((INTEL_DEVID(dev) & 0xf) == 0x2  || \
                                 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
+                                (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
 #define IS_HSW_ULT(dev)                (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
 #define IS_ULT(dev)            (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
@@ -2103,6 +2128,7 @@ struct drm_i915_cmd_table {
 #define IS_GEN6(dev)   (INTEL_INFO(dev)->gen == 6)
 #define IS_GEN7(dev)   (INTEL_INFO(dev)->gen == 7)
 #define IS_GEN8(dev)   (INTEL_INFO(dev)->gen == 8)
+#define IS_GEN9(dev)   (INTEL_INFO(dev)->gen == 9)
 
 #define RENDER_RING            (1<<RCS)
 #define BSD_RING               (1<<VCS)
@@ -2120,8 +2146,6 @@ struct drm_i915_cmd_table {
 
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
-#define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >= 6)
-#define HAS_PPGTT(dev)         (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
 #define USES_PPGTT(dev)                (i915.enable_ppgtt)
 #define USES_FULL_PPGTT(dev)   (i915.enable_ppgtt == 2)
 
@@ -2168,8 +2192,11 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_PPT_DEVICE_ID_TYPE           0x1e00
 #define INTEL_PCH_LPT_DEVICE_ID_TYPE           0x8c00
 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE                0x9c00
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE           0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE                0x9D00
 
 #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
+#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -2262,8 +2289,10 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
 
 void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
                                                        int new_delay);
-extern void intel_irq_init(struct drm_device *dev);
-extern void intel_hpd_init(struct drm_device *dev);
+extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_hpd_init(struct drm_i915_private *dev_priv);
+int intel_irq_install(struct drm_i915_private *dev_priv);
+void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
 extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev,
@@ -2793,7 +2822,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
 
 /* modesetting */
 extern void intel_modeset_init_hw(struct drm_device *dev);
-extern void intel_modeset_suspend_hw(struct drm_device *dev);
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -2804,7 +2832,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
 extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
+extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 extern void intel_init_pch_refclk(struct drm_device *dev);
index 28f91df..2719c25 100644 (file)
@@ -1945,7 +1945,14 @@ unsigned long
 i915_gem_shrink(struct drm_i915_private *dev_priv,
                long target, unsigned flags)
 {
-       const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
+       const struct {
+               struct list_head *list;
+               unsigned int bit;
+       } phases[] = {
+               { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
+               { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
+               { NULL, 0 },
+       }, *phase;
        unsigned long count = 0;
 
        /*
@@ -1967,48 +1974,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
         * dev->struct_mutex and so we won't ever be able to observe an
         * object on the bound_list with a reference count equals 0.
         */
-       if (flags & I915_SHRINK_UNBOUND) {
+       for (phase = phases; phase->list; phase++) {
                struct list_head still_in_list;
 
-               INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
-                       struct drm_i915_gem_object *obj;
-
-                       obj = list_first_entry(&dev_priv->mm.unbound_list,
-                                              typeof(*obj), global_list);
-                       list_move_tail(&obj->global_list, &still_in_list);
-
-                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                               continue;
-
-                       drm_gem_object_reference(&obj->base);
-
-                       if (i915_gem_object_put_pages(obj) == 0)
-                               count += obj->base.size >> PAGE_SHIFT;
-
-                       drm_gem_object_unreference(&obj->base);
-               }
-               list_splice(&still_in_list, &dev_priv->mm.unbound_list);
-       }
-
-       if (flags & I915_SHRINK_BOUND) {
-               struct list_head still_in_list;
+               if ((flags & phase->bit) == 0)
+                       continue;
 
                INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+               while (count < target && !list_empty(phase->list)) {
                        struct drm_i915_gem_object *obj;
                        struct i915_vma *vma, *v;
 
-                       obj = list_first_entry(&dev_priv->mm.bound_list,
+                       obj = list_first_entry(phase->list,
                                               typeof(*obj), global_list);
                        list_move_tail(&obj->global_list, &still_in_list);
 
-                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                       if (flags & I915_SHRINK_PURGEABLE &&
+                           !i915_gem_object_is_purgeable(obj))
                                continue;
 
                        drm_gem_object_reference(&obj->base);
 
-                       list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                       /* For the unbound phase, this should be a no-op! */
+                       list_for_each_entry_safe(vma, v,
+                                                &obj->vma_list, vma_link)
                                if (i915_vma_unbind(vma))
                                        break;
 
@@ -2017,7 +2006,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 
                        drm_gem_object_unreference(&obj->base);
                }
-               list_splice(&still_in_list, &dev_priv->mm.bound_list);
+               list_splice(&still_in_list, phase->list);
        }
 
        return count;
@@ -3166,6 +3155,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
             obj->stride, obj->tiling_mode);
 
        switch (INTEL_INFO(dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
@@ -3384,46 +3374,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
        return true;
 }
 
-static void i915_gem_verify_gtt(struct drm_device *dev)
-{
-#if WATCH_GTT
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
-       int err = 0;
-
-       list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
-               if (obj->gtt_space == NULL) {
-                       printk(KERN_ERR "object found on GTT list with no space reserved\n");
-                       err++;
-                       continue;
-               }
-
-               if (obj->cache_level != obj->gtt_space->color) {
-                       printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
-                              i915_gem_obj_ggtt_offset(obj),
-                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
-                              obj->cache_level,
-                              obj->gtt_space->color);
-                       err++;
-                       continue;
-               }
-
-               if (!i915_gem_valid_gtt_space(dev,
-                                             obj->gtt_space,
-                                             obj->cache_level)) {
-                       printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
-                              i915_gem_obj_ggtt_offset(obj),
-                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
-                              obj->cache_level);
-                       err++;
-                       continue;
-               }
-       }
-
-       WARN_ON(err);
-#endif
-}
-
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
@@ -3532,7 +3482,6 @@ search_free:
        vma->bind_vma(vma, obj->cache_level,
                      flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
 
-       i915_gem_verify_gtt(dev);
        return vma;
 
 err_remove_node:
@@ -3769,7 +3718,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                                    old_write_domain);
        }
 
-       i915_gem_verify_gtt(dev);
        return 0;
 }
 
@@ -5119,6 +5067,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        return ret;
 }
 
+/**
+ * i915_gem_track_fb - update frontbuffer tracking
+ * old: current GEM buffer for the frontbuffer slots
+ * new: new GEM buffer for the frontbuffer slots
+ * frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
                       unsigned frontbuffer_bits)
index b672b84..ae82ef5 100644 (file)
@@ -35,13 +35,21 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
 
 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
 {
-       if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+       bool has_aliasing_ppgtt;
+       bool has_full_ppgtt;
+
+       has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
+       has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
+       if (IS_GEN8(dev))
+               has_full_ppgtt = false; /* XXX why? */
+
+       if (enable_ppgtt == 0 || !has_aliasing_ppgtt)
                return 0;
 
        if (enable_ppgtt == 1)
                return 1;
 
-       if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+       if (enable_ppgtt == 2 && has_full_ppgtt)
                return 2;
 
 #ifdef CONFIG_INTEL_IOMMU
@@ -59,7 +67,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
                return 0;
        }
 
-       return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
+       return has_aliasing_ppgtt ? 1 : 0;
 }
 
 
@@ -1092,7 +1100,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 
        if (INTEL_INFO(dev)->gen < 8)
                return gen6_ppgtt_init(ppgtt);
-       else if (IS_GEN8(dev))
+       else if (IS_GEN8(dev) || IS_GEN9(dev))
                return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
        else
                BUG();
@@ -1764,7 +1772,6 @@ static int setup_scratch_page(struct drm_device *dev)
        page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
        if (page == NULL)
                return -ENOMEM;
-       get_page(page);
        set_pages_uc(page, 1);
 
 #ifdef CONFIG_INTEL_IOMMU
@@ -1789,7 +1796,6 @@ static void teardown_scratch_page(struct drm_device *dev)
        set_pages_wb(page, 1);
        pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       put_page(page);
        __free_page(page);
 }
 
@@ -1859,6 +1865,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
                return (gmch_ctrl - 0x17 + 9) << 22;
 }
 
+static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
+{
+       gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+       gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
+
+       if (gen9_gmch_ctl < 0xf0)
+               return gen9_gmch_ctl << 25; /* 32 MB units */
+       else
+               /* 4MB increments starting at 0xf0 for 4MB */
+               return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+}
+
 static int ggtt_probe_common(struct drm_device *dev,
                             size_t gtt_size)
 {
@@ -1955,7 +1973,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
 
        pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
-       if (IS_CHERRYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               *stolen = gen9_get_stolen_size(snb_gmch_ctl);
+               gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+       } else if (IS_CHERRYVIEW(dev)) {
                *stolen = chv_get_stolen_size(snb_gmch_ctl);
                gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
        } else {
@@ -2127,6 +2148,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
        vma->obj = obj;
 
        switch (INTEL_INFO(vm->dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
index 2c87a79..e664599 100644 (file)
@@ -765,6 +765,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
+       case 9:
        case 8:
        case 7:
        case 6:
@@ -923,6 +924,7 @@ static void i915_record_ring_state(struct drm_device *dev,
                ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
 
                switch (INTEL_INFO(dev)->gen) {
+               case 9:
                case 8:
                        for (i = 0; i < 4; i++) {
                                ering->vm_info.pdp[i] =
@@ -1326,13 +1328,12 @@ void i915_error_state_get(struct drm_device *dev,
                          struct i915_error_state_file_priv *error_priv)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       spin_lock_irq(&dev_priv->gpu_error.lock);
        error_priv->error = dev_priv->gpu_error.first_error;
        if (error_priv->error)
                kref_get(&error_priv->error->ref);
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+       spin_unlock_irq(&dev_priv->gpu_error.lock);
 
 }
 
@@ -1346,12 +1347,11 @@ void i915_destroy_error_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       spin_lock_irq(&dev_priv->gpu_error.lock);
        error = dev_priv->gpu_error.first_error;
        dev_priv->gpu_error.first_error = NULL;
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+       spin_unlock_irq(&dev_priv->gpu_error.lock);
 
        if (error)
                kref_put(&error->ref, i915_error_state_free);
@@ -1389,6 +1389,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
                WARN_ONCE(1, "Unsupported platform\n");
        case 7:
        case 8:
+       case 9:
                instdone[0] = I915_READ(GEN7_INSTDONE_1);
                instdone[1] = I915_READ(GEN7_SC_INSTDONE);
                instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
index f66392b..f17bbf3 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+/**
+ * DOC: interrupt handling
+ *
+ * These functions provide the basic support for enabling and disabling the
+ * interrupt handling support. There's a lot more functionality in i915_irq.c
+ * and related files, but that will be described in separate chapters.
+ */
+
 static const u32 hpd_ibx[] = {
        [HPD_CRT] = SDE_CRT_HOTPLUG,
        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -310,9 +318,8 @@ void i9xx_check_fifo_underruns(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *crtc;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       spin_lock_irq(&dev_priv->irq_lock);
 
        for_each_intel_crtc(dev, crtc) {
                u32 reg = PIPESTAT(crtc->pipe);
@@ -331,7 +338,7 @@ void i9xx_check_fifo_underruns(struct drm_device *dev)
                DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
        }
 
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -503,7 +510,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
                ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
        else if (IS_GEN7(dev))
                ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
-       else if (IS_GEN8(dev))
+       else if (IS_GEN8(dev) || IS_GEN9(dev))
                broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
        return old;
@@ -589,6 +596,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
        assert_spin_locked(&dev_priv->irq_lock);
+       WARN_ON(!intel_irqs_enabled(dev_priv));
 
        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
                      status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -615,6 +623,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
        assert_spin_locked(&dev_priv->irq_lock);
+       WARN_ON(!intel_irqs_enabled(dev_priv));
 
        if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
                      status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -694,19 +703,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 static void i915_enable_asle_pipestat(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
 
        if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
                return;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
 
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
        if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, PIPE_A,
                                     PIPE_LEGACY_BLC_EVENT_STATUS);
 
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 /**
@@ -1094,18 +1102,17 @@ static void i915_digport_work_func(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
                container_of(work, struct drm_i915_private, dig_port_work);
-       unsigned long irqflags;
        u32 long_port_mask, short_port_mask;
        struct intel_digital_port *intel_dig_port;
        int i, ret;
        u32 old_bits = 0;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        long_port_mask = dev_priv->long_hpd_port_mask;
        dev_priv->long_hpd_port_mask = 0;
        short_port_mask = dev_priv->short_hpd_port_mask;
        dev_priv->short_hpd_port_mask = 0;
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        for (i = 0; i < I915_MAX_PORTS; i++) {
                bool valid = false;
@@ -1130,9 +1137,9 @@ static void i915_digport_work_func(struct work_struct *work)
        }
 
        if (old_bits) {
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock_irq(&dev_priv->irq_lock);
                dev_priv->hpd_event_bits |= old_bits;
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock_irq(&dev_priv->irq_lock);
                schedule_work(&dev_priv->hotplug_work);
        }
 }
@@ -1151,7 +1158,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
        struct intel_connector *intel_connector;
        struct intel_encoder *intel_encoder;
        struct drm_connector *connector;
-       unsigned long irqflags;
        bool hpd_disabled = false;
        bool changed = false;
        u32 hpd_event_bits;
@@ -1159,7 +1165,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
        mutex_lock(&mode_config->mutex);
        DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
 
        hpd_event_bits = dev_priv->hpd_event_bits;
        dev_priv->hpd_event_bits = 0;
@@ -1193,7 +1199,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
                                 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
        }
 
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        list_for_each_entry(connector, &mode_config->connector_list, head) {
                intel_connector = to_intel_connector(connector);
@@ -1488,7 +1494,6 @@ static void ivybridge_parity_work(struct work_struct *work)
        u32 error_status, row, bank, subbank;
        char *parity_event[6];
        uint32_t misccpctl;
-       unsigned long flags;
        uint8_t slice = 0;
 
        /* We must turn off DOP level clock gating to access the L3 registers.
@@ -1547,9 +1552,9 @@ static void ivybridge_parity_work(struct work_struct *work)
 
 out:
        WARN_ON(dev_priv->l3_parity.which_slice);
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       spin_lock_irq(&dev_priv->irq_lock);
        gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        mutex_unlock(&dev_priv->dev->struct_mutex);
 }
@@ -2566,7 +2571,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        }
 
        for_each_pipe(dev_priv, pipe) {
-               uint32_t pipe_iir;
+               uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
 
                if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
                        continue;
@@ -2575,11 +2580,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                if (pipe_iir) {
                        ret = IRQ_HANDLED;
                        I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+
                        if (pipe_iir & GEN8_PIPE_VBLANK &&
                            intel_pipe_handle_vblank(dev, pipe))
                                intel_check_page_flip(dev, pipe);
 
-                       if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+                       if (IS_GEN9(dev))
+                               flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
+                       else
+                               flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+
+                       if (flip_done) {
                                intel_prepare_page_flip(dev, pipe);
                                intel_finish_page_flip_plane(dev, pipe);
                        }
@@ -2594,11 +2605,16 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                                                  pipe_name(pipe));
                        }
 
-                       if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+
+                       if (IS_GEN9(dev))
+                               fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+                       else
+                               fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+                       if (fault_errors)
                                DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
                                          pipe_name(pipe),
                                          pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
-                       }
                } else
                        DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
        }
@@ -3444,8 +3460,8 @@ static void gen8_irq_reset(struct drm_device *dev)
        gen8_gt_irq_reset(dev_priv);
 
        for_each_pipe(dev_priv, pipe)
-               if (intel_display_power_enabled(dev_priv,
-                                               POWER_DOMAIN_PIPE(pipe)))
+               if (intel_display_power_is_enabled(dev_priv,
+                                                  POWER_DOMAIN_PIPE(pipe)))
                        GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
 
        GEN5_IRQ_RESET(GEN8_DE_PORT_);
@@ -3457,15 +3473,14 @@ static void gen8_irq_reset(struct drm_device *dev)
 
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
-       unsigned long irqflags;
        uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
                          ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
        GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
                          ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void cherryview_irq_preinstall(struct drm_device *dev)
@@ -3584,7 +3599,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
 {
-       unsigned long irqflags;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 display_mask, extra_mask;
 
@@ -3623,9 +3637,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
                 * spinlocking not required here for correctness since interrupt
                 * setup is guaranteed to run in single-threaded context. But we
                 * need it to make the assert_spin_locked happy. */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock_irq(&dev_priv->irq_lock);
                ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock_irq(&dev_priv->irq_lock);
        }
 
        return 0;
@@ -3701,7 +3715,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
 
        dev_priv->display_irqs_enabled = true;
 
-       if (dev_priv->dev->irq_enabled)
+       if (intel_irqs_enabled(dev_priv))
                valleyview_display_irqs_install(dev_priv);
 }
 
@@ -3714,14 +3728,13 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
 
        dev_priv->display_irqs_enabled = false;
 
-       if (dev_priv->dev->irq_enabled)
+       if (intel_irqs_enabled(dev_priv))
                valleyview_display_irqs_uninstall(dev_priv);
 }
 
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
 
        dev_priv->irq_mask = ~0;
 
@@ -3735,10 +3748,10 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display_irqs_enabled)
                valleyview_display_irqs_install(dev_priv);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3783,18 +3796,26 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
 
 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 {
-       uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
-               GEN8_PIPE_CDCLK_CRC_DONE |
-               GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-       uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
-               GEN8_PIPE_FIFO_UNDERRUN;
+       uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+       uint32_t de_pipe_enables;
        int pipe;
+
+       if (IS_GEN9(dev_priv))
+               de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
+                                 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+       else
+               de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
+                                 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+
+       de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+                                          GEN8_PIPE_FIFO_UNDERRUN;
+
        dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
        dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
        dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
 
        for_each_pipe(dev_priv, pipe)
-               if (intel_display_power_enabled(dev_priv,
+               if (intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_PIPE(pipe)))
                        GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
                                          dev_priv->de_irq_mask[pipe],
@@ -3829,7 +3850,6 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
                I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
                PIPE_CRC_DONE_INTERRUPT_STATUS;
-       unsigned long irqflags;
        int pipe;
 
        /*
@@ -3841,11 +3861,11 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
        for_each_pipe(dev_priv, pipe)
                I915_WRITE(PIPESTAT(pipe), 0xffff);
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
        for_each_pipe(dev_priv, pipe)
                i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IMR, dev_priv->irq_mask);
@@ -3872,7 +3892,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
        int pipe;
 
        if (!dev_priv)
@@ -3887,10 +3906,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       /* Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked check happy. */
+       spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display_irqs_enabled)
                valleyview_display_irqs_uninstall(dev_priv);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        dev_priv->irq_mask = 0;
 
@@ -3976,7 +3997,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
 static int i8xx_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
 
        I915_WRITE16(EMR,
                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3999,10 +4019,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        return 0;
 }
@@ -4047,7 +4067,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u16 iir, new_iir;
        u32 pipe_stats[2];
-       unsigned long irqflags;
        int pipe;
        u16 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4063,7 +4082,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
                        i915_handle_error(dev, false,
                                          "Command parser error, iir 0x%08x",
@@ -4079,7 +4098,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                        if (pipe_stats[pipe] & 0x8000ffff)
                                I915_WRITE(reg, pipe_stats[pipe]);
                }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock(&dev_priv->irq_lock);
 
                I915_WRITE16(IIR, iir & ~flip_mask);
                new_iir = I915_READ16(IIR); /* Flush posted writes */
@@ -4149,7 +4168,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 enable_mask;
-       unsigned long irqflags;
 
        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
@@ -4187,10 +4205,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        return 0;
 }
@@ -4234,7 +4252,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
        struct drm_device *dev = arg;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
-       unsigned long irqflags;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
@@ -4250,7 +4267,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
                        i915_handle_error(dev, false,
                                          "Command parser error, iir 0x%08x",
@@ -4266,7 +4283,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                                irq_received = true;
                        }
                }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock(&dev_priv->irq_lock);
 
                if (!irq_received)
                        break;
@@ -4372,7 +4389,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 enable_mask;
        u32 error_mask;
-       unsigned long irqflags;
 
        /* Unmask the interrupts that we always want on. */
        dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -4393,11 +4409,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        /*
         * Enable some error detection, note the instruction error mask
@@ -4462,7 +4478,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 iir, new_iir;
        u32 pipe_stats[I915_MAX_PIPES];
-       unsigned long irqflags;
        int ret = IRQ_NONE, pipe;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4479,7 +4494,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock(&dev_priv->irq_lock);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
                        i915_handle_error(dev, false,
                                          "Command parser error, iir 0x%08x",
@@ -4497,7 +4512,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                                irq_received = true;
                        }
                }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock(&dev_priv->irq_lock);
 
                if (!irq_received)
                        break;
@@ -4584,19 +4599,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
        I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void intel_hpd_irq_reenable(struct work_struct *work)
+static void intel_hpd_irq_reenable_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv),
                             hotplug_reenable_work.work);
        struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
-       unsigned long irqflags;
        int i;
 
        intel_runtime_pm_get(dev_priv);
 
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
                struct drm_connector *connector;
 
@@ -4620,14 +4634,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work)
        }
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        intel_runtime_pm_put(dev_priv);
 }
 
-void intel_irq_init(struct drm_device *dev)
+/**
+ * intel_irq_init - initializes irq support
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes all the irq support including work items, timers
+ * and all the vtables. It does not setup the interrupt itself though.
+ */
+void intel_irq_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
 
        INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
        INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
@@ -4636,7 +4657,7 @@ void intel_irq_init(struct drm_device *dev)
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
        /* Let's track the enabled rps events */
-       if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+       if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                /* WaGsvRC0ResidencyMethod:vlv */
                dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
        else
@@ -4646,17 +4667,14 @@ void intel_irq_init(struct drm_device *dev)
                    i915_hangcheck_elapsed,
                    (unsigned long) dev);
        INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
-                         intel_hpd_irq_reenable);
+                         intel_hpd_irq_reenable_work);
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
-       /* Haven't installed the IRQ handler yet */
-       dev_priv->pm._irqs_disabled = true;
-
-       if (IS_GEN2(dev)) {
+       if (IS_GEN2(dev_priv)) {
                dev->max_vblank_count = 0;
                dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
-       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
                dev->driver->get_vblank_counter = gm45_get_vblank_counter;
        } else {
@@ -4669,7 +4687,7 @@ void intel_irq_init(struct drm_device *dev)
         * Gen2 doesn't have a hardware frame counter and so depends on
         * vblank interrupts to produce sane vblank seuquence numbers.
         */
-       if (!IS_GEN2(dev))
+       if (!IS_GEN2(dev_priv))
                dev->vblank_disable_immediate = true;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -4677,7 +4695,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
        }
 
-       if (IS_CHERRYVIEW(dev)) {
+       if (IS_CHERRYVIEW(dev_priv)) {
                dev->driver->irq_handler = cherryview_irq_handler;
                dev->driver->irq_preinstall = cherryview_irq_preinstall;
                dev->driver->irq_postinstall = cherryview_irq_postinstall;
@@ -4685,7 +4703,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-       } else if (IS_VALLEYVIEW(dev)) {
+       } else if (IS_VALLEYVIEW(dev_priv)) {
                dev->driver->irq_handler = valleyview_irq_handler;
                dev->driver->irq_preinstall = valleyview_irq_preinstall;
                dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4693,7 +4711,7 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-       } else if (IS_GEN8(dev)) {
+       } else if (INTEL_INFO(dev_priv)->gen >= 8) {
                dev->driver->irq_handler = gen8_irq_handler;
                dev->driver->irq_preinstall = gen8_irq_reset;
                dev->driver->irq_postinstall = gen8_irq_postinstall;
@@ -4710,12 +4728,12 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->disable_vblank = ironlake_disable_vblank;
                dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
        } else {
-               if (INTEL_INFO(dev)->gen == 2) {
+               if (INTEL_INFO(dev_priv)->gen == 2) {
                        dev->driver->irq_preinstall = i8xx_irq_preinstall;
                        dev->driver->irq_postinstall = i8xx_irq_postinstall;
                        dev->driver->irq_handler = i8xx_irq_handler;
                        dev->driver->irq_uninstall = i8xx_irq_uninstall;
-               } else if (INTEL_INFO(dev)->gen == 3) {
+               } else if (INTEL_INFO(dev_priv)->gen == 3) {
                        dev->driver->irq_preinstall = i915_irq_preinstall;
                        dev->driver->irq_postinstall = i915_irq_postinstall;
                        dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4733,12 +4751,23 @@ void intel_irq_init(struct drm_device *dev)
        }
 }
 
-void intel_hpd_init(struct drm_device *dev)
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
-       unsigned long irqflags;
        int i;
 
        for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -4756,27 +4785,72 @@ void intel_hpd_init(struct drm_device *dev)
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked checks happy. */
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-/* Disable interrupts so we can allow runtime PM. */
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_install - enables the hardware interrupt
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hardware interrupt handling, but leaves the hotplug
+ * handling still disabled. It is called after intel_irq_init().
+ *
+ * In the driver load and resume code we need working interrupts in a few places
+ * but don't want to deal with the hassle of concurrent probe and hotplug
+ * workers. Hence the split into this two-stage approach.
+ */
+int intel_irq_install(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       /*
+        * We enable some interrupt sources in our postinstall hooks, so mark
+        * interrupts as enabled _before_ actually enabling them to avoid
+        * special cases in our ordering checks.
+        */
+       dev_priv->pm.irqs_enabled = true;
 
-       dev->driver->irq_uninstall(dev);
-       dev_priv->pm._irqs_disabled = true;
+       return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
 }
 
-/* Restore interrupts so we can recover from runtime PM. */
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
+/**
+ * intel_irq_uninstall - finilizes all irq handling
+ * @dev_priv: i915 device instance
+ *
+ * This stops interrupt and hotplug handling and unregisters and frees all
+ * resources acquired in the init functions.
+ */
+void intel_irq_uninstall(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       drm_irq_uninstall(dev_priv->dev);
+       intel_hpd_cancel_work(dev_priv);
+       dev_priv->pm.irqs_enabled = false;
+}
+
+/**
+ * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to disable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+{
+       dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+       dev_priv->pm.irqs_enabled = false;
+}
 
-       dev_priv->pm._irqs_disabled = false;
-       dev->driver->irq_preinstall(dev);
-       dev->driver->irq_postinstall(dev);
+/**
+ * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
+ * @dev_priv: i915 device instance
+ *
+ * This function is used to enable interrupts at runtime, both in the runtime
+ * pm and the system suspend/resume code.
+ */
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
+{
+       dev_priv->pm.irqs_enabled = true;
+       dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
+       dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
 }
index c01e5f3..a56d9a7 100644 (file)
@@ -26,8 +26,8 @@
 #define _I915_REG_H_
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PLANE(plane, a, b) _PIPE(plane, a, b)
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
-
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
                               (pipe) == PIPE_B ? (b) : (c))
@@ -796,6 +796,8 @@ enum punit_power_well {
 #define _VLV_PCS_DW0_CH1               0x8400
 #define   DPIO_PCS_TX_LANE2_RESET      (1<<16)
 #define   DPIO_PCS_TX_LANE1_RESET      (1<<7)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER2        (1<<3)
 #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
 #define _VLV_PCS01_DW0_CH0             0x200
@@ -836,12 +838,31 @@ enum punit_power_well {
 
 #define _VLV_PCS_DW9_CH0               0x8224
 #define _VLV_PCS_DW9_CH1               0x8424
+#define   DPIO_PCS_TX2MARGIN_MASK      (0x7<<13)
+#define   DPIO_PCS_TX2MARGIN_000       (0<<13)
+#define   DPIO_PCS_TX2MARGIN_101       (1<<13)
+#define   DPIO_PCS_TX1MARGIN_MASK      (0x7<<10)
+#define   DPIO_PCS_TX1MARGIN_000       (0<<10)
+#define   DPIO_PCS_TX1MARGIN_101       (1<<10)
 #define        VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
 
+#define _VLV_PCS01_DW9_CH0             0x224
+#define _VLV_PCS23_DW9_CH0             0x424
+#define _VLV_PCS01_DW9_CH1             0x2624
+#define _VLV_PCS23_DW9_CH1             0x2824
+#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
+#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
+
 #define _CHV_PCS_DW10_CH0              0x8228
 #define _CHV_PCS_DW10_CH1              0x8428
 #define   DPIO_PCS_SWING_CALC_TX0_TX2  (1<<30)
 #define   DPIO_PCS_SWING_CALC_TX1_TX3  (1<<31)
+#define   DPIO_PCS_TX2DEEMP_MASK       (0xf<<24)
+#define   DPIO_PCS_TX2DEEMP_9P5                (0<<24)
+#define   DPIO_PCS_TX2DEEMP_6P0                (2<<24)
+#define   DPIO_PCS_TX1DEEMP_MASK       (0xf<<16)
+#define   DPIO_PCS_TX1DEEMP_9P5                (0<<16)
+#define   DPIO_PCS_TX1DEEMP_6P0                (2<<16)
 #define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
 
 #define _VLV_PCS01_DW10_CH0            0x0228
@@ -853,8 +874,18 @@ enum punit_power_well {
 
 #define _VLV_PCS_DW11_CH0              0x822c
 #define _VLV_PCS_DW11_CH1              0x842c
+#define   DPIO_LANEDESKEW_STRAP_OVRD   (1<<3)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER  (1<<1)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
 #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
 
+#define _VLV_PCS01_DW11_CH0            0x022c
+#define _VLV_PCS23_DW11_CH0            0x042c
+#define _VLV_PCS01_DW11_CH1            0x262c
+#define _VLV_PCS23_DW11_CH1            0x282c
+#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1)
+#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1)
+
 #define _VLV_PCS_DW12_CH0              0x8230
 #define _VLV_PCS_DW12_CH1              0x8430
 #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
@@ -2506,9 +2537,7 @@ enum punit_power_well {
 
 #define EDP_PSR_AUX_CTL(dev)                   (EDP_PSR_BASE(dev) + 0x10)
 #define EDP_PSR_AUX_DATA1(dev)                 (EDP_PSR_BASE(dev) + 0x14)
-#define   EDP_PSR_DPCD_COMMAND         0x80060000
 #define EDP_PSR_AUX_DATA2(dev)                 (EDP_PSR_BASE(dev) + 0x18)
-#define   EDP_PSR_DPCD_NORMAL_OPERATION        (1<<24)
 #define EDP_PSR_AUX_DATA3(dev)                 (EDP_PSR_BASE(dev) + 0x1c)
 #define EDP_PSR_AUX_DATA4(dev)                 (EDP_PSR_BASE(dev) + 0x20)
 #define EDP_PSR_AUX_DATA5(dev)                 (EDP_PSR_BASE(dev) + 0x24)
@@ -3645,6 +3674,7 @@ enum punit_power_well {
 #define   DP_AUX_CH_CTL_PRECHARGE_TEST     (1 << 11)
 #define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
 #define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+#define   DP_AUX_CH_CTL_SYNC_PULSE_SKL(c)   ((c) - 1)
 
 /*
  * Computing GMCH M and N values for the Display Port link
@@ -4510,6 +4540,143 @@ enum punit_power_well {
 #define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
 #define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
 
+/* Skylake plane registers */
+
+#define _PLANE_CTL_1_A                         0x70180
+#define _PLANE_CTL_2_A                         0x70280
+#define _PLANE_CTL_3_A                         0x70380
+#define   PLANE_CTL_ENABLE                     (1 << 31)
+#define   PLANE_CTL_PIPE_GAMMA_ENABLE          (1 << 30)
+#define   PLANE_CTL_FORMAT_MASK                        (0xf << 24)
+#define   PLANE_CTL_FORMAT_YUV422              (  0 << 24)
+#define   PLANE_CTL_FORMAT_NV12                        (  1 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_2101010                (  2 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_8888           (  4 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_16161616F      (  6 << 24)
+#define   PLANE_CTL_FORMAT_AYUV                        (  8 << 24)
+#define   PLANE_CTL_FORMAT_INDEXED             ( 12 << 24)
+#define   PLANE_CTL_FORMAT_RGB_565             ( 14 << 24)
+#define   PLANE_CTL_PIPE_CSC_ENABLE            (1 << 23)
+#define   PLANE_CTL_KEY_ENABLE_MASK            (0x3 << 21)
+#define   PLANE_CTL_KEY_ENABLE_SOURCE          (  1 << 21)
+#define   PLANE_CTL_KEY_ENABLE_DESTINATION     (  2 << 21)
+#define   PLANE_CTL_ORDER_BGRX                 (0 << 20)
+#define   PLANE_CTL_ORDER_RGBX                 (1 << 20)
+#define   PLANE_CTL_YUV422_ORDER_MASK          (0x3 << 16)
+#define   PLANE_CTL_YUV422_YUYV                        (  0 << 16)
+#define   PLANE_CTL_YUV422_UYVY                        (  1 << 16)
+#define   PLANE_CTL_YUV422_YVYU                        (  2 << 16)
+#define   PLANE_CTL_YUV422_VYUY                        (  3 << 16)
+#define   PLANE_CTL_DECOMPRESSION_ENABLE       (1 << 15)
+#define   PLANE_CTL_TRICKLE_FEED_DISABLE       (1 << 14)
+#define   PLANE_CTL_PLANE_GAMMA_DISABLE                (1 << 13)
+#define   PLANE_CTL_TILED_MASK                 (0x7 << 10)
+#define   PLANE_CTL_TILED_LINEAR               (  0 << 10)
+#define   PLANE_CTL_TILED_X                    (  1 << 10)
+#define   PLANE_CTL_TILED_Y                    (  4 << 10)
+#define   PLANE_CTL_TILED_YF                   (  5 << 10)
+#define   PLANE_CTL_ALPHA_MASK                 (0x3 << 4)
+#define   PLANE_CTL_ALPHA_DISABLE              (  0 << 4)
+#define   PLANE_CTL_ALPHA_SW_PREMULTIPLY       (  2 << 4)
+#define   PLANE_CTL_ALPHA_HW_PREMULTIPLY       (  3 << 4)
+#define _PLANE_STRIDE_1_A                      0x70188
+#define _PLANE_STRIDE_2_A                      0x70288
+#define _PLANE_STRIDE_3_A                      0x70388
+#define _PLANE_POS_1_A                         0x7018c
+#define _PLANE_POS_2_A                         0x7028c
+#define _PLANE_POS_3_A                         0x7038c
+#define _PLANE_SIZE_1_A                                0x70190
+#define _PLANE_SIZE_2_A                                0x70290
+#define _PLANE_SIZE_3_A                                0x70390
+#define _PLANE_SURF_1_A                                0x7019c
+#define _PLANE_SURF_2_A                                0x7029c
+#define _PLANE_SURF_3_A                                0x7039c
+#define _PLANE_OFFSET_1_A                      0x701a4
+#define _PLANE_OFFSET_2_A                      0x702a4
+#define _PLANE_OFFSET_3_A                      0x703a4
+#define _PLANE_KEYVAL_1_A                      0x70194
+#define _PLANE_KEYVAL_2_A                      0x70294
+#define _PLANE_KEYMSK_1_A                      0x70198
+#define _PLANE_KEYMSK_2_A                      0x70298
+#define _PLANE_KEYMAX_1_A                      0x701a0
+#define _PLANE_KEYMAX_2_A                      0x702a0
+
+#define _PLANE_CTL_1_B                         0x71180
+#define _PLANE_CTL_2_B                         0x71280
+#define _PLANE_CTL_3_B                         0x71380
+#define _PLANE_CTL_1(pipe)     _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
+#define _PLANE_CTL_2(pipe)     _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
+#define _PLANE_CTL_3(pipe)     _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
+#define PLANE_CTL(pipe, plane) \
+       _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
+
+#define _PLANE_STRIDE_1_B                      0x71188
+#define _PLANE_STRIDE_2_B                      0x71288
+#define _PLANE_STRIDE_3_B                      0x71388
+#define _PLANE_STRIDE_1(pipe)  \
+       _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
+#define _PLANE_STRIDE_2(pipe)  \
+       _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
+#define _PLANE_STRIDE_3(pipe)  \
+       _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
+#define PLANE_STRIDE(pipe, plane)      \
+       _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
+
+#define _PLANE_POS_1_B                         0x7118c
+#define _PLANE_POS_2_B                         0x7128c
+#define _PLANE_POS_3_B                         0x7138c
+#define _PLANE_POS_1(pipe)     _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
+#define _PLANE_POS_2(pipe)     _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
+#define _PLANE_POS_3(pipe)     _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
+#define PLANE_POS(pipe, plane) \
+       _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
+
+#define _PLANE_SIZE_1_B                                0x71190
+#define _PLANE_SIZE_2_B                                0x71290
+#define _PLANE_SIZE_3_B                                0x71390
+#define _PLANE_SIZE_1(pipe)    _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
+#define _PLANE_SIZE_2(pipe)    _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
+#define _PLANE_SIZE_3(pipe)    _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
+#define PLANE_SIZE(pipe, plane)        \
+       _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
+
+#define _PLANE_SURF_1_B                                0x7119c
+#define _PLANE_SURF_2_B                                0x7129c
+#define _PLANE_SURF_3_B                                0x7139c
+#define _PLANE_SURF_1(pipe)    _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
+#define _PLANE_SURF_2(pipe)    _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
+#define _PLANE_SURF_3(pipe)    _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
+#define PLANE_SURF(pipe, plane)        \
+       _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+
+#define _PLANE_OFFSET_1_B                      0x711a4
+#define _PLANE_OFFSET_2_B                      0x712a4
+#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
+#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
+#define PLANE_OFFSET(pipe, plane)      \
+       _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
+
+#define _PLANE_KEYVAL_1_B                      0x71194
+#define _PLANE_KEYVAL_2_B                      0x71294
+#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
+#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
+#define PLANE_KEYVAL(pipe, plane)      \
+       _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
+
+#define _PLANE_KEYMSK_1_B                      0x71198
+#define _PLANE_KEYMSK_2_B                      0x71298
+#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
+#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
+#define PLANE_KEYMSK(pipe, plane)      \
+       _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
+
+#define _PLANE_KEYMAX_1_B                      0x711a0
+#define _PLANE_KEYMAX_2_B                      0x712a0
+#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
+#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
+#define PLANE_KEYMAX(pipe, plane)      \
+       _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
+
 /* VBIOS regs */
 #define VGACNTRL               0x71400
 # define VGA_DISP_DISABLE                      (1 << 31)
@@ -4746,10 +4913,23 @@ enum punit_power_well {
 #define  GEN8_PIPE_SCAN_LINE_EVENT     (1 << 2)
 #define  GEN8_PIPE_VSYNC               (1 << 1)
 #define  GEN8_PIPE_VBLANK              (1 << 0)
+#define  GEN9_PIPE_CURSOR_FAULT                (1 << 11)
+#define  GEN9_PIPE_PLANE3_FAULT                (1 << 9)
+#define  GEN9_PIPE_PLANE2_FAULT                (1 << 8)
+#define  GEN9_PIPE_PLANE1_FAULT                (1 << 7)
+#define  GEN9_PIPE_PLANE3_FLIP_DONE    (1 << 5)
+#define  GEN9_PIPE_PLANE2_FLIP_DONE    (1 << 4)
+#define  GEN9_PIPE_PLANE1_FLIP_DONE    (1 << 3)
+#define  GEN9_PIPE_PLANE_FLIP_DONE(p)  (1 << (3 + p))
 #define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
        (GEN8_PIPE_CURSOR_FAULT | \
         GEN8_PIPE_SPRITE_FAULT | \
         GEN8_PIPE_PRIMARY_FAULT)
+#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
+       (GEN9_PIPE_CURSOR_FAULT | \
+        GEN9_PIPE_PLANE3_FAULT | \
+        GEN9_PIPE_PLANE2_FAULT | \
+        GEN9_PIPE_PLANE1_FAULT)
 
 #define GEN8_DE_PORT_ISR 0x44440
 #define GEN8_DE_PORT_IMR 0x44444
@@ -4839,6 +5019,7 @@ enum punit_power_well {
 /* GEN8 chicken */
 #define HDC_CHICKEN0                           0x7300
 #define  HDC_FORCE_NON_COHERENT                        (1<<4)
+#define  HDC_FENCE_DEST_SLM_DISABLE            (1<<14)
 
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         0x9030
@@ -5751,6 +5932,9 @@ enum punit_power_well {
 #define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE  (1<<10)
 #define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
+#define GEN9_HALF_SLICE_CHICKEN5       0xe188
+#define   GEN9_DG_MIRROR_FIX_ENABLE    (1<<5)
+
 #define GEN8_ROW_CHICKEN               0xe4f0
 #define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE        (1<<8)
 #define   STALL_DOP_GATING_DISABLE             (1<<5)
index 905999b..7603765 100644 (file)
@@ -46,7 +46,7 @@ struct bdb_header {
        u16 version;                    /**< decimal */
        u16 header_size;                /**< in bytes */
        u16 bdb_size;                   /**< in bytes */
-};
+} __packed;
 
 /* strictly speaking, this is a "skip" block, but it has interesting info */
 struct vbios_data {
@@ -252,7 +252,7 @@ union child_device_config {
        /* This one should also be safe to use anywhere, even without version
         * checks. */
        struct common_child_dev_config common;
-};
+} __packed;
 
 struct bdb_general_definitions {
        /* DDC GPIO */
@@ -888,12 +888,12 @@ struct mipi_pps_data {
        u16 bl_disable_delay;
        u16 panel_off_delay;
        u16 panel_power_cycle_delay;
-};
+} __packed;
 
 struct bdb_mipi_config {
        struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
        struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-};
+} __packed;
 
 /* Block 53 contains MIPI sequences as needed by the panel
  * for enabling it. This block can be variable in size and
@@ -902,7 +902,7 @@ struct bdb_mipi_config {
 struct bdb_mipi_sequence {
        u8 version;
        u8 data[0];
-};
+} __packed;
 
 /* MIPI Sequnece Block definitions */
 enum mipi_seq {
index 9212e65..dacaad5 100644 (file)
@@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
        u32 tmp;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(crt->adpa_reg);
index b63d4fa..a151de7 100644 (file)
@@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
        { 0x00BEFFFF, 0x00140006 },
        { 0x80B2CFFF, 0x001B0002 },
        { 0x00FFFFFF, 0x000E000A },
-       { 0x00D75FFF, 0x00180004 },
-       { 0x80CB2FFF, 0x001B0002 },
+       { 0x00DB6FFF, 0x00160005 },
+       { 0x80C71FFF, 0x001A0002 },
        { 0x00F7DFFF, 0x00180004 },
        { 0x80D75FFF, 0x001B0002 },
 };
@@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
        { 0x80FFFFFF, 0x001B0002 },     /* 9:   1000    1000    0       */
 };
 
+static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
+       { 0x00000018, 0x000000a0 },
+       { 0x00004014, 0x00000098 },
+       { 0x00006012, 0x00000088 },
+       { 0x00008010, 0x00000080 },
+       { 0x00000018, 0x00000098 },
+       { 0x00004014, 0x00000088 },
+       { 0x00006012, 0x00000080 },
+       { 0x00000018, 0x00000088 },
+       { 0x00004014, 0x00000080 },
+};
+
+static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
+                                       /* Idx  NT mV   T mV    db  */
+       { 0x00000018, 0x000000a0 },     /* 0:   400     400     0   */
+       { 0x00004014, 0x00000098 },     /* 1:   400     600     3.5 */
+       { 0x00006012, 0x00000088 },     /* 2:   400     800     6   */
+       { 0x00000018, 0x0000003c },     /* 3:   450     450     0   */
+       { 0x00000018, 0x00000098 },     /* 4:   600     600     0   */
+       { 0x00003015, 0x00000088 },     /* 5:   600     800     2.5 */
+       { 0x00005013, 0x00000080 },     /* 6:   600     1000    4.5 */
+       { 0x00000018, 0x00000088 },     /* 7:   800     800     0   */
+       { 0x00000096, 0x00000080 },     /* 8:   800     1000    2   */
+       { 0x00000018, 0x00000080 },     /* 9:   1200    1200    0   */
+};
+
 enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
@@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
        const struct ddi_buf_trans *ddi_translations_hdmi;
        const struct ddi_buf_trans *ddi_translations;
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_SKYLAKE(dev)) {
+               ddi_translations_fdi = NULL;
+               ddi_translations_dp = skl_ddi_translations_dp;
+               ddi_translations_edp = skl_ddi_translations_dp;
+               ddi_translations_hdmi = skl_ddi_translations_hdmi;
+               n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+               hdmi_800mV_0dB = 7;
+       } else if (IS_BROADWELL(dev)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
                ddi_translations_edp = bdw_ddi_translations_edp;
@@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
                        ddi_translations = ddi_translations_dp;
                break;
        case PORT_E:
-               ddi_translations = ddi_translations_fdi;
+               if (ddi_translations_fdi)
+                       ddi_translations = ddi_translations_fdi;
+               else
+                       ddi_translations = ddi_translations_dp;
                break;
        default:
                BUG();
@@ -962,7 +998,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
        uint32_t tmp;
 
        power_domain = intel_display_port_power_domain(intel_encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
@@ -1008,7 +1044,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
        int i;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(DDI_BUF_CTL(port));
@@ -1296,7 +1332,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        uint32_t val;
 
-       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
        val = I915_READ(WRPLL_CTL(pll->id));
@@ -1486,7 +1522,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                break;
        }
 
-       if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+       if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
                temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
                if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
                        pipe_config->has_audio = true;
index c9e2209..1fc05ff 100644 (file)
@@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
        DRM_FORMAT_ARGB8888,
 };
 
-static void intel_increase_pllclock(struct drm_device *dev,
-                                   enum pipe pipe);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -889,60 +887,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
        return intel_crtc->config.cpu_transcoder;
 }
 
-static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
-
-       frame = I915_READ(frame_reg);
-
-       if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
-               WARN(1, "vblank wait on pipe %c timed out\n",
-                    pipe_name(pipe));
-}
-
-/**
- * intel_wait_for_vblank - wait for vblank on a given pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * Wait for vblank to occur on a given pipe.  Needed for various bits of
- * mode setting code.
- */
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipestat_reg = PIPESTAT(pipe);
-
-       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-               g4x_wait_for_vblank(dev, pipe);
-               return;
-       }
-
-       /* Clear existing vblank status. Note this will clear any other
-        * sticky status fields as well.
-        *
-        * This races with i915_driver_irq_handler() with the result
-        * that either function could miss a vblank event.  Here it is not
-        * fatal, as we will either wait upon the next vblank interrupt or
-        * timeout.  Generally speaking intel_wait_for_vblank() is only
-        * called during modeset at which time the GPU should be idle and
-        * should *not* be performing page flips and thus not waiting on
-        * vblanks...
-        * Currently, the result of us stealing a vblank from the irq
-        * handler is that a single frame will be skipped during swapbuffers.
-        */
-       I915_WRITE(pipestat_reg,
-                  I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
-
-       /* Wait for vblank interrupt bit to set */
-       if (wait_for(I915_READ(pipestat_reg) &
-                    PIPE_VBLANK_INTERRUPT_STATUS,
-                    50))
-               DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
-                             pipe_name(pipe));
-}
-
 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1189,8 +1133,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
             state_string(state), state_string(cur_state));
 }
 
-static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
-                                 enum pipe pipe)
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+                          enum pipe pipe)
 {
        struct drm_device *dev = dev_priv->dev;
        int pp_reg;
@@ -1263,7 +1207,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
            (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
                state = true;
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
                cur_state = false;
        } else {
@@ -1332,7 +1276,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
        int reg, sprite;
        u32 val;
 
-       if (IS_VALLEYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               for_each_sprite(pipe, sprite) {
+                       val = I915_READ(PLANE_CTL(pipe, sprite));
+                       WARN(val & PLANE_CTL_ENABLE,
+                            "plane %d assertion failure, should be off on pipe %c but is still active\n",
+                            sprite, pipe_name(pipe));
+               }
+       } else if (IS_VALLEYVIEW(dev)) {
                for_each_sprite(pipe, sprite) {
                        reg = SPCNTR(pipe, sprite);
                        val = I915_READ(reg);
@@ -2233,7 +2184,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
 
        switch (obj->tiling_mode) {
        case I915_TILING_NONE:
-               if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+               if (INTEL_INFO(dev)->gen >= 9)
+                       alignment = 256 * 1024;
+               else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
                        alignment = 128 * 1024;
                else if (INTEL_INFO(dev)->gen >= 4)
                        alignment = 4 * 1024;
@@ -2241,8 +2194,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
                        alignment = 64 * 1024;
                break;
        case I915_TILING_X:
-               /* pin() will align the object as required by fence */
-               alignment = 0;
+               if (INTEL_INFO(dev)->gen >= 9)
+                       alignment = 256 * 1024;
+               else {
+                       /* pin() will align the object as required by fence */
+                       alignment = 0;
+               }
                break;
        case I915_TILING_Y:
                WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2672,6 +2629,90 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
        POSTING_READ(reg);
 }
 
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_framebuffer *intel_fb;
+       struct drm_i915_gem_object *obj;
+       int pipe = intel_crtc->pipe;
+       u32 plane_ctl, stride;
+
+       if (!intel_crtc->primary_enabled) {
+               I915_WRITE(PLANE_CTL(pipe, 0), 0);
+               I915_WRITE(PLANE_SURF(pipe, 0), 0);
+               POSTING_READ(PLANE_CTL(pipe, 0));
+               return;
+       }
+
+       plane_ctl = PLANE_CTL_ENABLE |
+                   PLANE_CTL_PIPE_GAMMA_ENABLE |
+                   PLANE_CTL_PIPE_CSC_ENABLE;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_RGB565:
+               plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+               plane_ctl |= PLANE_CTL_ORDER_RGBX;
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+               break;
+       default:
+               BUG();
+       }
+
+       intel_fb = to_intel_framebuffer(fb);
+       obj = intel_fb->obj;
+
+       /*
+        * The stride is either expressed as a multiple of 64 bytes chunks for
+        * linear buffers or in number of tiles for tiled buffers.
+        */
+       switch (obj->tiling_mode) {
+       case I915_TILING_NONE:
+               stride = fb->pitches[0] >> 6;
+               break;
+       case I915_TILING_X:
+               plane_ctl |= PLANE_CTL_TILED_X;
+               stride = fb->pitches[0] >> 9;
+               break;
+       default:
+               BUG();
+       }
+
+       plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+       I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
+
+       DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
+                     i915_gem_obj_ggtt_offset(obj),
+                     x, y, fb->width, fb->height,
+                     fb->pitches[0]);
+
+       I915_WRITE(PLANE_POS(pipe, 0), 0);
+       I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
+       I915_WRITE(PLANE_SIZE(pipe, 0),
+                  (intel_crtc->config.pipe_src_h - 1) << 16 |
+                  (intel_crtc->config.pipe_src_w - 1));
+       I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+       I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
+
+       POSTING_READ(PLANE_SURF(pipe, 0));
+}
+
 /* Assume fb object is pinned & idle & fenced and just update base pointers */
 static int
 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2682,7 +2723,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        if (dev_priv->display.disable_fbc)
                dev_priv->display.disable_fbc(dev);
-       intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
 
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -2762,20 +2802,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
        bool pending;
 
        if (i915_reset_in_progress(&dev_priv->gpu_error) ||
            intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
                return false;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        pending = to_intel_crtc(crtc)->unpin_work != NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        return pending;
 }
 
+static void intel_update_pipe_size(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct drm_display_mode *adjusted_mode;
+
+       if (!i915.fastboot)
+               return;
+
+       /*
+        * Update pipe size and adjust fitter if needed: the reason for this is
+        * that in compute_mode_changes we check the native mode (not the pfit
+        * mode) to see if we can flip rather than do a full mode set. In the
+        * fastboot case, we'll flip, but if we don't update the pipesrc and
+        * pfit state, we'll end up with a big fb scanned out into the wrong
+        * sized surface.
+        *
+        * To fix this properly, we need to hoist the checks up into
+        * compute_mode_changes (or above), check the actual pfit state and
+        * whether the platform allows pfit disable with pipe active, and only
+        * then update the pipesrc and pfit state, even on the flip path.
+        */
+
+       adjusted_mode = &crtc->config.adjusted_mode;
+
+       I915_WRITE(PIPESRC(crtc->pipe),
+                  ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+                  (adjusted_mode->crtc_vdisplay - 1));
+       if (!crtc->config.pch_pfit.enabled &&
+           (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) ||
+            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))) {
+               I915_WRITE(PF_CTL(crtc->pipe), 0);
+               I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
+               I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
+       }
+       crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
+       crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
+}
+
 static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                    struct drm_framebuffer *fb)
@@ -2818,36 +2896,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
-       /*
-        * Update pipe size and adjust fitter if needed: the reason for this is
-        * that in compute_mode_changes we check the native mode (not the pfit
-        * mode) to see if we can flip rather than do a full mode set. In the
-        * fastboot case, we'll flip, but if we don't update the pipesrc and
-        * pfit state, we'll end up with a big fb scanned out into the wrong
-        * sized surface.
-        *
-        * To fix this properly, we need to hoist the checks up into
-        * compute_mode_changes (or above), check the actual pfit state and
-        * whether the platform allows pfit disable with pipe active, and only
-        * then update the pipesrc and pfit state, even on the flip path.
-        */
-       if (i915.fastboot) {
-               const struct drm_display_mode *adjusted_mode =
-                       &intel_crtc->config.adjusted_mode;
-
-               I915_WRITE(PIPESRC(intel_crtc->pipe),
-                          ((adjusted_mode->crtc_hdisplay - 1) << 16) |
-                          (adjusted_mode->crtc_vdisplay - 1));
-               if (!intel_crtc->config.pch_pfit.enabled &&
-                   (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
-                    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
-                       I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
-                       I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
-                       I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
-               }
-               intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
-               intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
-       }
+       intel_update_pipe_size(intel_crtc);
 
        dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -3472,14 +3521,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
                                       !intel_crtc_has_pending_flip(crtc),
                                       60*HZ) == 0)) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               unsigned long flags;
 
-               spin_lock_irqsave(&dev->event_lock, flags);
+               spin_lock_irq(&dev->event_lock);
                if (intel_crtc->unpin_work) {
                        WARN_ONCE(1, "Removing stuck page flip\n");
                        page_flip_completed(intel_crtc);
                }
-               spin_unlock_irqrestore(&dev->event_lock, flags);
+               spin_unlock_irq(&dev->event_lock);
        }
 
        if (crtc->primary->fb) {
@@ -4038,10 +4086,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
-       assert_vblank_disabled(crtc);
-
-       drm_vblank_on(dev, pipe);
-
        intel_enable_primary_hw_plane(crtc->primary, crtc);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@ -4087,10 +4131,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
         * consider this a flip to a NULL plane.
         */
        intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
-
-       drm_vblank_off(dev, pipe);
-
-       assert_vblank_disabled(crtc);
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4160,6 +4200,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        if (HAS_PCH_CPT(dev))
                cpt_verify_modeset(dev, intel_crtc->pipe);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 }
 
@@ -4272,6 +4315,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_opregion_notify_encoder(encoder, true);
        }
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        /* If we change the relative order between pipe/planes enabling, we need
         * to change the workaround. */
        haswell_mode_set_planes_workaround(intel_crtc);
@@ -4307,6 +4353,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc_disable_planes(crtc);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->disable(encoder);
 
@@ -4369,6 +4418,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc_disable_planes(crtc);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                intel_opregion_notify_encoder(encoder, false);
                encoder->disable(encoder);
@@ -4510,20 +4562,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
        return mask;
 }
 
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
-                                 bool enable)
-{
-       if (dev_priv->power_domains.init_power_on == enable)
-               return;
-
-       if (enable)
-               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-       else
-               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
-       dev_priv->power_domains.init_power_on = enable;
-}
-
 static void modeset_update_crtc_power_domains(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4835,6 +4873,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 
        /* Underruns don't raise interrupts, so check manually. */
@@ -4892,6 +4933,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
 
+       assert_vblank_disabled(crtc);
+       drm_crtc_vblank_on(crtc);
+
        intel_crtc_enable_planes(crtc);
 
        /*
@@ -4955,9 +4999,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        intel_set_memory_cxsr(dev_priv, false);
        intel_crtc_disable_planes(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
-
        /*
         * On gen2 planes are double buffered but the pipe isn't, so we must
         * wait for planes to fully turn off before disabling the pipe.
@@ -4966,6 +5007,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
         */
        intel_wait_for_vblank(dev, pipe);
 
+       drm_crtc_vblank_off(crtc);
+       assert_vblank_disabled(crtc);
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               encoder->disable(encoder);
+
        intel_disable_pipe(intel_crtc);
 
        i9xx_pfit_disable(intel_crtc);
@@ -6434,8 +6481,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
-                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+       if (!intel_display_power_is_enabled(dev_priv,
+                                           POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7021,7 +7068,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
        I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
        POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
                val = 0;
 
                switch (intel_crtc->config.pipe_bpp) {
@@ -7444,8 +7491,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
-                                        POWER_DOMAIN_PIPE(crtc->pipe)))
+       if (!intel_display_power_is_enabled(dev_priv,
+                                           POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7638,7 +7685,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 {
        uint32_t val;
-       unsigned long irqflags;
 
        val = I915_READ(LCPLL_CTL);
 
@@ -7658,10 +7704,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
         * to call special forcewake code that doesn't touch runtime PM and
         * doesn't enable the forcewake delayed work.
         */
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       spin_lock_irq(&dev_priv->uncore.lock);
        if (dev_priv->uncore.forcewake_count++ == 0)
                dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 
        if (val & LCPLL_POWER_DOWN_ALLOW) {
                val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7692,10 +7738,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
        }
 
        /* See the big comment above. */
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       spin_lock_irq(&dev_priv->uncore.lock);
        if (--dev_priv->uncore.forcewake_count == 0)
                dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
 /*
@@ -7824,7 +7870,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
         * DDI E. So just check whether this pipe is wired to DDI E and whether
         * the PCH transcoder is on.
         */
-       if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+       if (INTEL_INFO(dev)->gen < 9 &&
+           (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
                pipe_config->has_pch_encoder = true;
 
                tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -7843,7 +7890,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        enum intel_display_power_domain pfit_domain;
        uint32_t tmp;
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                                         POWER_DOMAIN_PIPE(crtc->pipe)))
                return false;
 
@@ -7872,7 +7919,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                        pipe_config->cpu_transcoder = TRANSCODER_EDP;
        }
 
-       if (!intel_display_power_enabled(dev_priv,
+       if (!intel_display_power_is_enabled(dev_priv,
                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
                return false;
 
@@ -7885,7 +7932,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        intel_get_pipe_timings(crtc, pipe_config);
 
        pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-       if (intel_display_power_enabled(dev_priv, pfit_domain))
+       if (intel_display_power_is_enabled(dev_priv, pfit_domain))
                ironlake_get_pfit_config(crtc, pipe_config);
 
        if (IS_HASWELL(dev))
@@ -8255,8 +8302,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
                intel_crtc->cursor_cntl = 0;
        }
 
-       if (intel_crtc->cursor_base != base)
+       if (intel_crtc->cursor_base != base) {
                I915_WRITE(_CURABASE, base);
+               intel_crtc->cursor_base = base;
+       }
 
        if (intel_crtc->cursor_size != size) {
                I915_WRITE(CURSIZE, size);
@@ -8296,9 +8345,10 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
                                return;
                }
                cntl |= pipe << 28; /* Connect to correct pipe */
+
+               if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+                       cntl |= CURSOR_PIPE_CSC_ENABLE;
        }
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               cntl |= CURSOR_PIPE_CSC_ENABLE;
 
        if (intel_crtc->cursor_cntl != cntl) {
                I915_WRITE(CURCNTR(pipe), cntl);
@@ -8309,6 +8359,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
        /* and commit changes on next vblank */
        I915_WRITE(CURBASE(pipe), base);
        POSTING_READ(CURBASE(pipe));
+
+       intel_crtc->cursor_base = base;
 }
 
 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8359,7 +8411,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
                i845_update_cursor(crtc, base);
        else
                i9xx_update_cursor(crtc, base);
-       intel_crtc->cursor_base = base;
 }
 
 static bool cursor_size_ok(struct drm_device *dev,
@@ -9023,35 +9074,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        return mode;
 }
 
-static void intel_increase_pllclock(struct drm_device *dev,
-                                   enum pipe pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int dpll_reg = DPLL(pipe);
-       int dpll;
-
-       if (!HAS_GMCH_DISPLAY(dev))
-               return;
-
-       if (!dev_priv->lvds_downclock_avail)
-               return;
-
-       dpll = I915_READ(dpll_reg);
-       if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-               DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
-               assert_panel_unlocked(dev_priv, pipe);
-
-               dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-               I915_WRITE(dpll_reg, dpll);
-               intel_wait_for_vblank(dev, pipe);
-
-               dpll = I915_READ(dpll_reg);
-               if (dpll & DISPLAY_RATE_SELECT_FPA1)
-                       DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-       }
-}
-
 static void intel_decrease_pllclock(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -9127,199 +9149,16 @@ out:
        intel_runtime_pm_put(dev_priv);
 }
 
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
-                              unsigned frontbuffer_bits,
-                              struct intel_engine_cs *ring)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe pipe;
-
-       if (!i915.powersave)
-               return;
-
-       for_each_pipe(dev_priv, pipe) {
-               if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
-                       continue;
-
-               intel_increase_pllclock(dev, pipe);
-               if (ring && intel_fbc_enabled(dev))
-                       ring->fbc_dirty = true;
-       }
-}
-
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            struct intel_engine_cs *ring)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       if (!obj->frontbuffer_bits)
-               return;
-
-       if (ring) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
-               dev_priv->fb_tracking.busy_bits
-                       |= obj->frontbuffer_bits;
-               dev_priv->fb_tracking.flip_bits
-                       &= ~obj->frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
-       intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called every time rendering on the given planes has
- * completed and frontbuffer caching can be started again. Flushes will get
- * delayed if they're blocked by some oustanding asynchronous rendering.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flush(struct drm_device *dev,
-                            unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* Delay flushing when rings are still busy.*/
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
-
-       intel_edp_psr_flush(dev, frontbuffer_bits);
-
-       /*
-        * FIXME: Unconditional fbc flushing here is a rather gross hack and
-        * needs to be reworked into a proper frontbuffer tracking scheme like
-        * psr employs.
-        */
-       if (IS_BROADWELL(dev))
-               gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
-}
-
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                       bool retire)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned frontbuffer_bits;
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-       if (!obj->frontbuffer_bits)
-               return;
-
-       frontbuffer_bits = obj->frontbuffer_bits;
-
-       if (retire) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
-               /* Filter out new bits since rendering started. */
-               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
-               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
-/**
- * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
-                                   unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       dev_priv->fb_tracking.flip_bits
-               |= frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
-                                    unsigned frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       mutex_lock(&dev_priv->fb_tracking.lock);
-       /* Mask any cancelled flips. */
-       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
-       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct intel_unpin_work *work;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        work = intel_crtc->unpin_work;
        intel_crtc->unpin_work = NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        if (work) {
                cancel_work_sync(&work->work);
@@ -9365,6 +9204,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
        if (intel_crtc == NULL)
                return;
 
+       /*
+        * This is called both by irq handlers and the reset code (to complete
+        * lost pageflips) so needs the full irqsave spinlocks.
+        */
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
 
@@ -9446,7 +9289,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
 
-       /* NB: An MMIO update of the plane base pointer will also
+
+       /*
+        * This is called both by irq handlers and the reset code (to complete
+        * lost pageflips) so needs the full irqsave spinlocks.
+        *
+        * NB: An MMIO update of the plane base pointer will also
         * generate a page-flip completion irq, i.e. every modeset
         * is also accompanied by a spurious intel_prepare_page_flip().
         */
@@ -9821,7 +9669,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long irq_flags;
        int ret;
 
        if (WARN_ON(intel_crtc->mmio_flip.seqno))
@@ -9835,10 +9682,10 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
                return 0;
        }
 
-       spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+       spin_lock_irq(&dev_priv->mmio_flip_lock);
        intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
        intel_crtc->mmio_flip.ring_id = obj->ring->id;
-       spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+       spin_unlock_irq(&dev_priv->mmio_flip_lock);
 
        /*
         * Double check to catch cases where irq fired before
@@ -9903,18 +9750,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned long flags;
+
+       WARN_ON(!in_irq());
 
        if (crtc == NULL)
                return;
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock(&dev->event_lock);
        if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
                WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
                         intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
                page_flip_completed(intel_crtc);
        }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock(&dev->event_lock);
 }
 
 static int intel_crtc_page_flip(struct drm_crtc *crtc,
@@ -9930,7 +9778,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        enum pipe pipe = intel_crtc->pipe;
        struct intel_unpin_work *work;
        struct intel_engine_cs *ring;
-       unsigned long flags;
        int ret;
 
        /*
@@ -9971,7 +9818,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto free_work;
 
        /* We borrow the event spin lock for protecting unpin_work */
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        if (intel_crtc->unpin_work) {
                /* Before declaring the flip queue wedged, check if
                 * the hardware completed the operation behind our backs.
@@ -9981,7 +9828,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                        page_flip_completed(intel_crtc);
                } else {
                        DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       spin_unlock_irq(&dev->event_lock);
 
                        drm_crtc_vblank_put(crtc);
                        kfree(work);
@@ -9989,7 +9836,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                }
        }
        intel_crtc->unpin_work = work;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
                flush_workqueue(dev_priv->wq);
@@ -10076,9 +9923,9 @@ cleanup_pending:
        mutex_unlock(&dev->struct_mutex);
 
 cleanup:
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        intel_crtc->unpin_work = NULL;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
 
        drm_crtc_vblank_put(crtc);
 free_work:
@@ -10089,9 +9936,9 @@ out_hang:
                intel_crtc_wait_for_pending_flips(crtc);
                ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
                if (ret == 0 && event) {
-                       spin_lock_irqsave(&dev->event_lock, flags);
+                       spin_lock_irq(&dev->event_lock);
                        drm_send_vblank_event(dev, pipe, event);
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
+                       spin_unlock_irq(&dev->event_lock);
                }
        }
        return ret;
@@ -11677,7 +11524,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 {
        uint32_t val;
 
-       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
                return false;
 
        val = I915_READ(PCH_DPLL(pll->id));
@@ -11811,89 +11658,37 @@ disable_unpin:
 }
 
 static int
-intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
-                            struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                            unsigned int crtc_w, unsigned int crtc_h,
-                            uint32_t src_x, uint32_t src_y,
-                            uint32_t src_w, uint32_t src_h)
+intel_check_primary_plane(struct drm_plane *plane,
+                         struct intel_plane_state *state)
+{
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_rect *dest = &state->dst;
+       struct drm_rect *src = &state->src;
+       const struct drm_rect *clip = &state->clip;
+
+       return drm_plane_helper_check_update(plane, crtc, fb,
+                                           src, dest, clip,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           false, true, &state->visible);
+}
+
+static int
+intel_commit_primary_plane(struct drm_plane *plane,
+                          struct intel_plane_state *state)
 {
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
-       struct drm_rect dest = {
-               /* integer pixels */
-               .x1 = crtc_x,
-               .y1 = crtc_y,
-               .x2 = crtc_x + crtc_w,
-               .y2 = crtc_y + crtc_h,
-       };
-       struct drm_rect src = {
-               /* 16.16 fixed point */
-               .x1 = src_x,
-               .y1 = src_y,
-               .x2 = src_x + src_w,
-               .y2 = src_y + src_h,
-       };
-       const struct drm_rect clip = {
-               /* integer pixels */
-               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
-               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
-       };
-       const struct {
-               int crtc_x, crtc_y;
-               unsigned int crtc_w, crtc_h;
-               uint32_t src_x, src_y, src_w, src_h;
-       } orig = {
-               .crtc_x = crtc_x,
-               .crtc_y = crtc_y,
-               .crtc_w = crtc_w,
-               .crtc_h = crtc_h,
-               .src_x = src_x,
-               .src_y = src_y,
-               .src_w = src_w,
-               .src_h = src_h,
-       };
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       bool visible;
+       struct drm_rect *src = &state->src;
        int ret;
 
-       ret = drm_plane_helper_check_update(plane, crtc, fb,
-                                           &src, &dest, &clip,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           DRM_PLANE_HELPER_NO_SCALING,
-                                           false, true, &visible);
-
-       if (ret)
-               return ret;
-
-       /*
-        * If the CRTC isn't enabled, we're just pinning the framebuffer,
-        * updating the fb pointer, and returning without touching the
-        * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
-        * turn on the display with all planes setup as desired.
-        */
-       if (!crtc->enabled) {
-               mutex_lock(&dev->struct_mutex);
-
-               /*
-                * If we already called setplane while the crtc was disabled,
-                * we may have an fb pinned; unpin it.
-                */
-               if (plane->fb)
-                       intel_unpin_fb_obj(old_obj);
-
-               i915_gem_track_fb(old_obj, obj,
-                                 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
-
-               /* Pin and return without programming hardware */
-               ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-               mutex_unlock(&dev->struct_mutex);
-
-               return ret;
-       }
-
        intel_crtc_wait_for_pending_flips(crtc);
 
        /*
@@ -11902,7 +11697,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
         * happens if userspace explicitly disables the plane by passing fb=0
         * because plane->fb still gets set and pinned.
         */
-       if (!visible) {
+       if (!state->visible) {
                mutex_lock(&dev->struct_mutex);
 
                /*
@@ -11949,7 +11744,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
                                intel_disable_fbc(dev);
                        }
                }
-               ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
+               ret = intel_pipe_set_base(crtc, src->x1, src->y1, fb);
                if (ret)
                        return ret;
 
@@ -11957,19 +11752,62 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
                        intel_enable_primary_hw_plane(plane, crtc);
        }
 
-       intel_plane->crtc_x = orig.crtc_x;
-       intel_plane->crtc_y = orig.crtc_y;
-       intel_plane->crtc_w = orig.crtc_w;
-       intel_plane->crtc_h = orig.crtc_h;
-       intel_plane->src_x = orig.src_x;
-       intel_plane->src_y = orig.src_y;
-       intel_plane->src_w = orig.src_w;
-       intel_plane->src_h = orig.src_h;
+       intel_plane->crtc_x = state->orig_dst.x1;
+       intel_plane->crtc_y = state->orig_dst.y1;
+       intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+       intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+       intel_plane->src_x = state->orig_src.x1;
+       intel_plane->src_y = state->orig_src.y1;
+       intel_plane->src_w = drm_rect_width(&state->orig_src);
+       intel_plane->src_h = drm_rect_height(&state->orig_src);
        intel_plane->obj = obj;
 
        return 0;
 }
 
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                            unsigned int crtc_w, unsigned int crtc_h,
+                            uint32_t src_x, uint32_t src_y,
+                            uint32_t src_w, uint32_t src_h)
+{
+       struct intel_plane_state state;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int ret;
+
+       state.crtc = crtc;
+       state.fb = fb;
+
+       /* sample coordinates in 16.16 fixed point */
+       state.src.x1 = src_x;
+       state.src.x2 = src_x + src_w;
+       state.src.y1 = src_y;
+       state.src.y2 = src_y + src_h;
+
+       /* integer pixels */
+       state.dst.x1 = crtc_x;
+       state.dst.x2 = crtc_x + crtc_w;
+       state.dst.y1 = crtc_y;
+       state.dst.y2 = crtc_y + crtc_h;
+
+       state.clip.x1 = 0;
+       state.clip.y1 = 0;
+       state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+       state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+       state.orig_src = state.src;
+       state.orig_dst = state.dst;
+
+       ret = intel_check_primary_plane(plane, &state);
+       if (ret)
+               return ret;
+
+       intel_commit_primary_plane(plane, &state);
+
+       return 0;
+}
+
 /* Common destruction function for both primary and cursor planes */
 static void intel_plane_destroy(struct drm_plane *plane)
 {
@@ -12044,51 +11882,41 @@ intel_cursor_plane_disable(struct drm_plane *plane)
 }
 
 static int
-intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
-                         struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                         unsigned int crtc_w, unsigned int crtc_h,
-                         uint32_t src_x, uint32_t src_y,
-                         uint32_t src_w, uint32_t src_h)
+intel_check_cursor_plane(struct drm_plane *plane,
+                        struct intel_plane_state *state)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
-       struct drm_rect dest = {
-               /* integer pixels */
-               .x1 = crtc_x,
-               .y1 = crtc_y,
-               .x2 = crtc_x + crtc_w,
-               .y2 = crtc_y + crtc_h,
-       };
-       struct drm_rect src = {
-               /* 16.16 fixed point */
-               .x1 = src_x,
-               .y1 = src_y,
-               .x2 = src_x + src_w,
-               .y2 = src_y + src_h,
-       };
-       const struct drm_rect clip = {
-               /* integer pixels */
-               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
-               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
-       };
-       bool visible;
-       int ret;
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_rect *dest = &state->dst;
+       struct drm_rect *src = &state->src;
+       const struct drm_rect *clip = &state->clip;
 
-       ret = drm_plane_helper_check_update(plane, crtc, fb,
-                                           &src, &dest, &clip,
+       return drm_plane_helper_check_update(plane, crtc, fb,
+                                           src, dest, clip,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            DRM_PLANE_HELPER_NO_SCALING,
-                                           true, true, &visible);
-       if (ret)
-               return ret;
+                                           true, true, &state->visible);
+}
 
-       crtc->cursor_x = crtc_x;
-       crtc->cursor_y = crtc_y;
+static int
+intel_commit_cursor_plane(struct drm_plane *plane,
+                         struct intel_plane_state *state)
+{
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
+       int crtc_w, crtc_h;
+
+       crtc->cursor_x = state->orig_dst.x1;
+       crtc->cursor_y = state->orig_dst.y1;
        if (fb != crtc->cursor->fb) {
+               crtc_w = drm_rect_width(&state->orig_dst);
+               crtc_h = drm_rect_height(&state->orig_dst);
                return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
        } else {
-               intel_crtc_update_cursor(crtc, visible);
+               intel_crtc_update_cursor(crtc, state->visible);
 
                intel_frontbuffer_flip(crtc->dev,
                                       INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
@@ -12096,6 +11924,48 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                return 0;
        }
 }
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+                         struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                         unsigned int crtc_w, unsigned int crtc_h,
+                         uint32_t src_x, uint32_t src_y,
+                         uint32_t src_w, uint32_t src_h)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane_state state;
+       int ret;
+
+       state.crtc = crtc;
+       state.fb = fb;
+
+       /* sample coordinates in 16.16 fixed point */
+       state.src.x1 = src_x;
+       state.src.x2 = src_x + src_w;
+       state.src.y1 = src_y;
+       state.src.y2 = src_y + src_h;
+
+       /* integer pixels */
+       state.dst.x1 = crtc_x;
+       state.dst.x2 = crtc_x + crtc_w;
+       state.dst.y1 = crtc_y;
+       state.dst.y2 = crtc_y + crtc_h;
+
+       state.clip.x1 = 0;
+       state.clip.y1 = 0;
+       state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+       state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+
+       state.orig_src = state.src;
+       state.orig_dst = state.dst;
+
+       ret = intel_check_cursor_plane(plane, &state);
+       if (ret)
+               return ret;
+
+       return intel_commit_cursor_plane(plane, &state);
+}
+
 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
        .update_plane = intel_cursor_plane_update,
        .disable_plane = intel_cursor_plane_disable,
@@ -12284,6 +12154,9 @@ static bool intel_crt_present(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (INTEL_INFO(dev)->gen >= 9)
+               return false;
+
        if (IS_ULT(dev))
                return false;
 
@@ -12636,8 +12509,12 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
                dev_priv->display.off = ironlake_crtc_off;
-               dev_priv->display.update_primary_plane =
-                       ironlake_update_primary_plane;
+               if (INTEL_INFO(dev)->gen >= 9)
+                       dev_priv->display.update_primary_plane =
+                               skylake_update_primary_plane;
+               else
+                       dev_priv->display.update_primary_plane =
+                               ironlake_update_primary_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
                dev_priv->display.get_plane_config = ironlake_get_plane_config;
@@ -12721,6 +12598,10 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.modeset_global_resources =
                        valleyview_modeset_global_resources;
                dev_priv->display.write_eld = ironlake_write_eld;
+       } else if (INTEL_INFO(dev)->gen >= 9) {
+               dev_priv->display.write_eld = haswell_write_eld;
+               dev_priv->display.modeset_global_resources =
+                       haswell_modeset_global_resources;
        }
 
        /* Default just returns -ENODEV to indicate unsupported */
@@ -12948,11 +12829,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
        intel_enable_gt_powersave(dev);
 }
 
-void intel_modeset_suspend_hw(struct drm_device *dev)
-{
-       intel_suspend_hw(dev);
-}
-
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13288,7 +13164,7 @@ void i915_redisable_vga(struct drm_device *dev)
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
-       if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
                return;
 
        i915_redisable_vga_power_on(dev);
@@ -13509,9 +13385,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
         * Too much stuff here (turning of rps, connectors, ...) would
         * experience fancy races otherwise.
         */
-       drm_irq_uninstall(dev);
-       intel_hpd_cancel_work(dev_priv);
-       dev_priv->pm._irqs_disabled = true;
+       intel_irq_uninstall(dev_priv);
 
        /*
         * Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -13666,8 +13540,8 @@ intel_display_capture_error_state(struct drm_device *dev)
 
        for_each_pipe(dev_priv, i) {
                error->pipe[i].power_domain_on =
-                       intel_display_power_enabled_unlocked(dev_priv,
-                                                          POWER_DOMAIN_PIPE(i));
+                       __intel_display_power_is_enabled(dev_priv,
+                                                        POWER_DOMAIN_PIPE(i));
                if (!error->pipe[i].power_domain_on)
                        continue;
 
@@ -13702,7 +13576,7 @@ intel_display_capture_error_state(struct drm_device *dev)
                enum transcoder cpu_transcoder = transcoders[i];
 
                error->transcoder[i].power_domain_on =
-                       intel_display_power_enabled_unlocked(dev_priv,
+                       __intel_display_power_is_enabled(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
                if (!error->transcoder[i].power_domain_on)
                        continue;
@@ -13786,9 +13660,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
 
        for_each_intel_crtc(dev, crtc) {
                struct intel_unpin_work *work;
-               unsigned long irqflags;
 
-               spin_lock_irqsave(&dev->event_lock, irqflags);
+               spin_lock_irq(&dev->event_lock);
 
                work = crtc->unpin_work;
 
@@ -13798,6 +13671,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
                        work->event = NULL;
                }
 
-               spin_unlock_irqrestore(&dev->event_lock, irqflags);
+               spin_unlock_irq(&dev->event_lock);
        }
 }
index f6a3fdd..64c8e04 100644 (file)
@@ -225,7 +225,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
 }
 
 static uint32_t
-pack_aux(uint8_t *src, int src_bytes)
+pack_aux(const uint8_t *src, int src_bytes)
 {
        int     i;
        uint32_t v = 0;
@@ -661,6 +661,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
        return index ? 0 : 100;
 }
 
+static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+       /*
+        * SKL doesn't need us to program the AUX clock divider (Hardware will
+        * derive the clock from CDCLK automatically). We still implement the
+        * get_aux_clock_divider vfunc to plug-in into the existing code.
+        */
+       return index ? 0 : 1;
+}
+
 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
                                      bool has_aux_irq,
                                      int send_bytes,
@@ -691,9 +701,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
               (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
 }
 
+static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+                                     bool has_aux_irq,
+                                     int send_bytes,
+                                     uint32_t unused)
+{
+       return DP_AUX_CH_CTL_SEND_BUSY |
+              DP_AUX_CH_CTL_DONE |
+              (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+              DP_AUX_CH_CTL_TIME_OUT_ERROR |
+              DP_AUX_CH_CTL_TIME_OUT_1600us |
+              DP_AUX_CH_CTL_RECEIVE_ERROR |
+              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+              DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+}
+
 static int
 intel_dp_aux_ch(struct intel_dp *intel_dp,
-               uint8_t *send, int send_bytes,
+               const uint8_t *send, int send_bytes,
                uint8_t *recv, int recv_size)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -925,7 +950,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
                BUG();
        }
 
-       if (!HAS_DDI(dev))
+       /*
+        * The AUX_CTL register is usually DP_CTL + 0x10.
+        *
+        * On Haswell and Broadwell though:
+        *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
+        *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
+        *
+        * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
+        */
+       if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
                intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
        intel_dp->aux.name = name;
@@ -1819,7 +1853,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
        u32 tmp;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(intel_dp->output_reg);
@@ -1995,10 +2029,8 @@ static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
        POSTING_READ(ctl_reg);
 }
 
-static void intel_edp_psr_setup(struct intel_dp *intel_dp)
+static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_vsc_psr psr_vsc;
 
        /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
@@ -2008,10 +2040,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
        psr_vsc.sdp_header.HB2 = 0x2;
        psr_vsc.sdp_header.HB3 = 0x8;
        intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
-
-       /* Avoid continuous PSR exit by masking memup and hpd */
-       I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
-                  EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 }
 
 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
@@ -2021,8 +2049,17 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t aux_clock_divider;
        int precharge = 0x3;
-       int msg_size = 5;       /* Header(4) + Message(1) */
        bool only_standby = false;
+       static const uint8_t aux_msg[] = {
+               [0] = DP_AUX_NATIVE_WRITE << 4,
+               [1] = DP_SET_POWER >> 8,
+               [2] = DP_SET_POWER & 0xff,
+               [3] = 1 - 1,
+               [4] = DP_SET_POWER_D0,
+       };
+       int i;
+
+       BUILD_BUG_ON(sizeof(aux_msg) > 20);
 
        aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
@@ -2038,11 +2075,13 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
                                   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 
        /* Setup AUX registers */
-       I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
-       I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
+       for (i = 0; i < sizeof(aux_msg); i += 4)
+               I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
+                          pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+
        I915_WRITE(EDP_PSR_AUX_CTL(dev),
                   DP_AUX_CH_CTL_TIME_OUT_400us |
-                  (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+                  (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
                   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
                   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
 }
@@ -2131,10 +2170,7 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
        WARN_ON(dev_priv->psr.active);
        lockdep_assert_held(&dev_priv->psr.lock);
 
-       /* Enable PSR on the panel */
-       intel_edp_psr_enable_sink(intel_dp);
-
-       /* Enable PSR on the host */
+       /* Enable/Re-enable PSR on the host */
        intel_edp_psr_enable_source(intel_dp);
 
        dev_priv->psr.active = true;
@@ -2158,17 +2194,25 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp)
        mutex_lock(&dev_priv->psr.lock);
        if (dev_priv->psr.enabled) {
                DRM_DEBUG_KMS("PSR already in use\n");
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
+               goto unlock;
        }
 
+       if (!intel_edp_psr_match_conditions(intel_dp))
+               goto unlock;
+
        dev_priv->psr.busy_frontbuffer_bits = 0;
 
-       /* Setup PSR once */
-       intel_edp_psr_setup(intel_dp);
+       intel_edp_psr_setup_vsc(intel_dp);
 
-       if (intel_edp_psr_match_conditions(intel_dp))
-               dev_priv->psr.enabled = intel_dp;
+       /* Avoid continuous PSR exit by masking memup and hpd */
+       I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+                  EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+
+       /* Enable PSR on the panel */
+       intel_edp_psr_enable_sink(intel_dp);
+
+       dev_priv->psr.enabled = intel_dp;
+unlock:
        mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -2209,6 +2253,17 @@ static void intel_edp_psr_work(struct work_struct *work)
                container_of(work, typeof(*dev_priv), psr.work.work);
        struct intel_dp *intel_dp = dev_priv->psr.enabled;
 
+       /* We have to make sure PSR is ready for re-enable
+        * otherwise it keeps disabled until next full enable/disable cycle.
+        * PSR might take some time to get fully disabled
+        * and be ready for re-enable.
+        */
+       if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
+                     EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+               DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+               return;
+       }
+
        mutex_lock(&dev_priv->psr.lock);
        intel_dp = dev_priv->psr.enabled;
 
@@ -2680,6 +2735,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
 
        mutex_lock(&dev_priv->dpio_lock);
 
+       /* allow hardware to manage TX FIFO reset source */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
        /* Deassert soft data lane reset*/
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
        val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -2836,7 +2900,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       if (IS_VALLEYVIEW(dev))
+       if (INTEL_INFO(dev)->gen >= 9)
+               return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+       else if (IS_VALLEYVIEW(dev))
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
        else if (IS_GEN7(dev) && port == PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2852,7 +2918,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (INTEL_INFO(dev)->gen >= 9) {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
+               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
+               default:
+                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
+               }
+       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3088,12 +3165,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
        /* Clear calc init */
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
        /* Program swing deemph */
        for (i = 0; i < 4; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
@@ -3334,7 +3425,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
        uint32_t signal_levels, mask;
        uint8_t train_set = intel_dp->train_set[0];
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
                signal_levels = intel_hsw_signal_levels(train_set);
                mask = DDI_BUF_EMP_MASK;
        } else if (IS_CHERRYVIEW(dev)) {
@@ -3801,26 +3892,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(intel_dig_port->base.base.crtc);
-       u8 buf[1];
+       u8 buf;
+       int test_crc_count;
+       int attempts = 6;
 
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
                return -EIO;
 
-       if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+       if (!(buf & DP_TEST_CRC_SUPPORTED))
                return -ENOTTY;
 
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+               return -EIO;
+
        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                              DP_TEST_SINK_START) < 0)
+                               buf | DP_TEST_SINK_START) < 0)
+               return -EIO;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
                return -EIO;
+       test_crc_count = buf & DP_TEST_COUNT_MASK;
 
-       /* Wait 2 vblanks to be sure we will have the correct CRC value */
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       do {
+               if (drm_dp_dpcd_readb(&intel_dp->aux,
+                                     DP_TEST_SINK_MISC, &buf) < 0)
+                       return -EIO;
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+       } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
+
+       if (attempts == 0) {
+               DRM_ERROR("Panel is unable to calculate CRC after 6 vblanks\n");
+               return -EIO;
+       }
 
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
                return -EIO;
 
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+               return -EIO;
+       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+                              buf & ~DP_TEST_SINK_START) < 0)
+               return -EIO;
+
        return 0;
 }
 
@@ -5057,7 +5170,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        intel_dp->pps_pipe = INVALID_PIPE;
 
        /* intel_dp vfuncs */
-       if (IS_VALLEYVIEW(dev))
+       if (INTEL_INFO(dev)->gen >= 9)
+               intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+       else if (IS_VALLEYVIEW(dev))
                intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
        else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5066,7 +5181,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        else
                intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
 
-       intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+       if (INTEL_INFO(dev)->gen >= 9)
+               intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+       else
+               intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
 
        /* Preserve the current hw state. */
        intel_dp->DP = I915_READ(intel_dp->output_reg);
index ba71522..94993d2 100644 (file)
@@ -34,6 +34,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_rect.h>
 
 #define DIV_ROUND_CLOSEST_ULL(ll, d)   \
 ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
@@ -240,6 +241,17 @@ typedef struct dpll {
        int     p;
 } intel_clock_t;
 
+struct intel_plane_state {
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       struct drm_rect src;
+       struct drm_rect dst;
+       struct drm_rect clip;
+       struct drm_rect orig_src;
+       struct drm_rect orig_dst;
+       bool visible;
+};
+
 struct intel_plane_config {
        bool tiled;
        int size;
@@ -734,6 +746,14 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
        return container_of(intel_hdmi, struct intel_digital_port, hdmi);
 }
 
+/*
+ * Returns the number of planes for this pipe, ie the number of sprites + 1
+ * (primary plane). This doesn't count the cursor plane then.
+ */
+static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
+{
+       return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
+}
 
 /* i915_irq.c */
 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
@@ -747,15 +767,15 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
-void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
 static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
 {
        /*
         * We only use drm_irq_uninstall() at unload and VT switch, so
         * this is the only thing we need to check.
         */
-       return !dev_priv->pm._irqs_disabled;
+       return dev_priv->pm.irqs_enabled;
 }
 
 int intel_get_crtc_scanline(struct intel_crtc *crtc);
@@ -792,11 +812,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
                         struct intel_crtc_config *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 
-/* intel_display.c */
-const char *intel_output_name(int output);
-bool intel_has_pending_fb_unpin(struct drm_device *dev);
-int intel_pch_rawclk(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
+/* intel_frontbuffer.c */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
                             struct intel_engine_cs *ring);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
@@ -806,7 +822,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
 void intel_frontbuffer_flush(struct drm_device *dev,
                             unsigned frontbuffer_bits);
 /**
- * intel_frontbuffer_flip - prepare frontbuffer flip
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
@@ -824,6 +840,13 @@ void intel_frontbuffer_flip(struct drm_device *dev,
 }
 
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+
+
+/* intel_display.c */
+const char *intel_output_name(int output);
+bool intel_has_pending_fb_unpin(struct drm_device *dev);
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
 void intel_crtc_control(struct drm_crtc *crtc, bool enable);
@@ -844,7 +867,11 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
                                             enum pipe pipe);
-void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+static inline void
+intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+       drm_wait_one_vblank(dev, pipe);
+}
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
                         struct intel_digital_port *dport);
@@ -878,6 +905,8 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
 void intel_put_shared_dpll(struct intel_crtc *crtc);
 
 /* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+                          enum pipe pipe);
 void assert_pll(struct drm_i915_private *dev_priv,
                enum pipe pipe, bool state);
 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -908,7 +937,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
 bool intel_crtc_active(struct drm_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
-void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
 enum intel_display_power_domain
 intel_display_port_power_domain(struct intel_encoder *intel_encoder);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
@@ -1055,6 +1083,28 @@ extern struct drm_display_mode *intel_find_panel_downclock(
                                struct drm_display_mode *fixed_mode,
                                struct drm_connector *connector);
 
+/* intel_runtime_pm.c */
+int intel_power_domains_init(struct drm_i915_private *);
+void intel_power_domains_fini(struct drm_i915_private *);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                   enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                     enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+
+void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
+
 /* intel_pm.c */
 void intel_init_clock_gating(struct drm_device *dev);
 void intel_suspend_hw(struct drm_device *dev);
@@ -1072,17 +1122,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
 void intel_update_fbc(struct drm_device *dev);
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
 void intel_gpu_ips_teardown(void);
-int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_remove(struct drm_i915_private *);
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
-                                enum intel_display_power_domain domain);
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
-                                         enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain);
-void intel_display_power_put(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
 void intel_init_gt_powersave(struct drm_device *dev);
 void intel_cleanup_gt_powersave(struct drm_device *dev);
 void intel_enable_gt_powersave(struct drm_device *dev);
@@ -1093,13 +1132,6 @@ void ironlake_teardown_rc6(struct drm_device *dev);
 void gen6_update_ring_freq(struct drm_device *dev);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
 void gen6_rps_boost(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 
 
index 5bd9e09..0b18407 100644 (file)
@@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
        DRM_DEBUG_KMS("\n");
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        /* XXX: this only works for one DSI output */
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
new file mode 100644 (file)
index 0000000..58cf2e6
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * Copyright Â© 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+/**
+ * DOC: frontbuffer tracking
+ *
+ * Many features require us to track changes to the currently active
+ * frontbuffer, especially rendering targeted at the frontbuffer.
+ *
+ * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
+ * frontbuffer slots through i915_gem_track_fb(). The function in this file are
+ * then called when the contents of the frontbuffer are invalidated, when
+ * frontbuffer rendering has stopped again to flush out all the changes and when
+ * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
+ * into the relevant places and filter for the frontbuffer slots that they are
+ * interested int.
+ *
+ * On a high level there are two types of powersaving features. The first one
+ * work like a special cache (FBC and PSR) and are interested when they should
+ * stop caching and when to restart caching. This is done by placing callbacks
+ * into the invalidate and the flush functions: At invalidate the caching must
+ * be stopped and at flush time it can be restarted. And maybe they need to know
+ * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
+ * and flush on its own) which can be achieved with placing callbacks into the
+ * flip functions.
+ *
+ * The other type of display power saving feature only cares about busyness
+ * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
+ * busyness. There is no direct way to detect idleness. Instead an idle timer
+ * work delayed work should be started from the flush and flip functions and
+ * cancelled as soon as busyness is detected.
+ *
+ * Note that there's also an older frontbuffer activity tracking scheme which
+ * just tracks general activity. This is done by the various mark_busy and
+ * mark_idle functions. For display power management features using these
+ * functions is deprecated and should be avoided.
+ */
+
+#include <drm/drmP.h>
+
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+static void intel_increase_pllclock(struct drm_device *dev,
+                                   enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int dpll_reg = DPLL(pipe);
+       int dpll;
+
+       if (!HAS_GMCH_DISPLAY(dev))
+               return;
+
+       if (!dev_priv->lvds_downclock_avail)
+               return;
+
+       dpll = I915_READ(dpll_reg);
+       if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+               DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+               assert_panel_unlocked(dev_priv, pipe);
+
+               dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+               I915_WRITE(dpll_reg, dpll);
+               intel_wait_for_vblank(dev, pipe);
+
+               dpll = I915_READ(dpll_reg);
+               if (dpll & DISPLAY_RATE_SELECT_FPA1)
+                       DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+       }
+}
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+                              unsigned frontbuffer_bits,
+                              struct intel_engine_cs *ring)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe;
+
+       if (!i915.powersave)
+               return;
+
+       for_each_pipe(dev_priv, pipe) {
+               if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
+                       continue;
+
+               intel_increase_pllclock(dev, pipe);
+               if (ring && intel_fbc_enabled(dev))
+                       ring->fbc_dirty = true;
+       }
+}
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                            struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (!obj->frontbuffer_bits)
+               return;
+
+       if (ring) {
+               mutex_lock(&dev_priv->fb_tracking.lock);
+               dev_priv->fb_tracking.busy_bits
+                       |= obj->frontbuffer_bits;
+               dev_priv->fb_tracking.flip_bits
+                       &= ~obj->frontbuffer_bits;
+               mutex_unlock(&dev_priv->fb_tracking.lock);
+       }
+
+       intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+       intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some outstanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+                            unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Delay flushing when rings are still busy.*/
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+
+       intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+       intel_edp_psr_flush(dev, frontbuffer_bits);
+
+       /*
+        * FIXME: Unconditional fbc flushing here is a rather gross hack and
+        * needs to be reworked into a proper frontbuffer tracking scheme like
+        * psr employs.
+        */
+       if (dev_priv->fbc.need_sw_cache_clean) {
+               dev_priv->fbc.need_sw_cache_clean = false;
+               bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
+       }
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                       bool retire)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned frontbuffer_bits;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (!obj->frontbuffer_bits)
+               return;
+
+       frontbuffer_bits = obj->frontbuffer_bits;
+
+       if (retire) {
+               mutex_lock(&dev_priv->fb_tracking.lock);
+               /* Filter out new bits since rendering started. */
+               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+               mutex_unlock(&dev_priv->fb_tracking.lock);
+       }
+
+       intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+                                   unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+       /* Remove stale busy bits due to the old buffer. */
+       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+                                    unsigned frontbuffer_bits)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev_priv->fb_tracking.lock);
+       /* Mask any cancelled flips. */
+       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+       mutex_unlock(&dev_priv->fb_tracking.lock);
+
+       intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
index 29ec153..8b5f3aa 100644 (file)
@@ -690,7 +690,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
        u32 tmp;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1405,6 +1405,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
 
        mutex_lock(&dev_priv->dpio_lock);
 
+       /* allow hardware to manage TX FIFO reset source */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
        /* Deassert soft data lane reset*/
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
        val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -1441,12 +1450,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
        /* Clear calc init */
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+
        /* FIXME: Program the support xxx V-dB */
        /* Use 800mV-0dB */
        for (i = 0; i < 4; i++) {
index bafd38b..803fc38 100644 (file)
@@ -1063,7 +1063,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
index a6bd142..2b50c98 100644 (file)
@@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
        u32 tmp;
 
        power_domain = intel_display_port_power_domain(encoder);
-       if (!intel_display_power_enabled(dev_priv, power_domain))
+       if (!intel_display_power_is_enabled(dev_priv, power_domain))
                return false;
 
        tmp = I915_READ(lvds_encoder->reg);
index 0e018cb..e3def5a 100644 (file)
@@ -537,14 +537,13 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        val = dev_priv->display.get_backlight(connector);
        val = intel_panel_compute_brightness(connector, val);
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 
        DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
        return val;
@@ -628,12 +627,11 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 hw_level;
-       unsigned long flags;
 
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        WARN_ON(panel->backlight.max == 0);
 
@@ -643,7 +641,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
        if (panel->backlight.enabled)
                intel_panel_actually_set_backlight(connector, hw_level);
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 }
 
 /* set backlight brightness to level in range [0..max], assuming hw min is
@@ -657,12 +655,11 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
        u32 hw_level;
-       unsigned long flags;
 
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        WARN_ON(panel->backlight.max == 0);
 
@@ -678,7 +675,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
        if (panel->backlight.enabled)
                intel_panel_actually_set_backlight(connector, hw_level);
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 }
 
 static void pch_disable_backlight(struct intel_connector *connector)
@@ -732,7 +729,6 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
-       unsigned long flags;
 
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
@@ -748,14 +744,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
                return;
        }
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        if (panel->backlight.device)
                panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
        panel->backlight.enabled = false;
        dev_priv->display.disable_backlight(connector);
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 }
 
 static void bdw_enable_backlight(struct intel_connector *connector)
@@ -936,14 +932,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
        enum pipe pipe = intel_get_pipe_from_connector(connector);
-       unsigned long flags;
 
        if (!panel->backlight.present || pipe == INVALID_PIPE)
                return;
 
        DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
 
        WARN_ON(panel->backlight.max == 0);
 
@@ -961,7 +956,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
        if (panel->backlight.device)
                panel->backlight.device->props.power = FB_BLANK_UNBLANK;
 
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 }
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -1266,7 +1261,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_panel *panel = &intel_connector->panel;
-       unsigned long flags;
        int ret;
 
        if (!dev_priv->vbt.backlight.present) {
@@ -1279,9 +1273,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
        }
 
        /* set level and max in panel struct */
-       spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+       mutex_lock(&dev_priv->backlight_lock);
        ret = dev_priv->display.setup_backlight(intel_connector);
-       spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+       mutex_unlock(&dev_priv->backlight_lock);
 
        if (ret) {
                DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@ -1316,7 +1310,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
                dev_priv->display.setup_backlight = bdw_setup_backlight;
                dev_priv->display.enable_backlight = bdw_enable_backlight;
                dev_priv->display.disable_backlight = pch_disable_backlight;
index c27b614..a14be5d 100644 (file)
@@ -30,9 +30,6 @@
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
-#include <linux/vgaarb.h>
-#include <drm/i915_powerwell.h>
-#include <linux/pm_runtime.h>
 
 /**
  * RC6 is a special power stage which allows the GPU to enter an very
  * i915.i915_enable_fbc parameter
  */
 
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /*
+        * WaDisableSDEUnitClockGating:skl
+        * This seems to be a pre-production w/a.
+        */
+       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+       /*
+        * WaDisableDgMirrorFixInHalfSliceChicken5:skl
+        * This is a pre-production w/a.
+        */
+       I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
+                  I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
+                  ~GEN9_DG_MIRROR_FIX_ENABLE);
+
+       /* Wa4x4STCOptimizationDisable:skl */
+       I915_WRITE(CACHE_MODE_1,
+                  _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
+}
+
 static void i8xx_disable_fbc(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 fbc_ctl;
 
+       dev_priv->fbc.enabled = false;
+
        /* Disable compression */
        fbc_ctl = I915_READ(FBC_CONTROL);
        if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -99,6 +122,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
        int i;
        u32 fbc_ctl;
 
+       dev_priv->fbc.enabled = true;
+
        cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
        if (fb->pitches[0] < cfb_pitch)
                cfb_pitch = fb->pitches[0];
@@ -153,6 +178,8 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = true;
+
        dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
@@ -173,6 +200,8 @@ static void g4x_disable_fbc(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = false;
+
        /* Disable compression */
        dpfc_ctl = I915_READ(DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
@@ -224,6 +253,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = true;
+
        dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dev_priv->fbc.threshold++;
@@ -264,6 +295,8 @@ static void ironlake_disable_fbc(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = false;
+
        /* Disable compression */
        dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
@@ -290,6 +323,8 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
+       dev_priv->fbc.enabled = true;
+
        dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
                dev_priv->fbc.threshold++;
@@ -339,19 +374,19 @@ bool intel_fbc_enabled(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!dev_priv->display.fbc_enabled)
-               return false;
-
-       return dev_priv->display.fbc_enabled(dev);
+       return dev_priv->fbc.enabled;
 }
 
-void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
+void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (!IS_GEN8(dev))
                return;
 
+       if (!intel_fbc_enabled(dev))
+               return;
+
        I915_WRITE(MSG_FBC_REND_STATE, value);
 }
 
@@ -6041,1161 +6076,35 @@ void intel_suspend_hw(struct drm_device *dev)
                lpt_suspend_hw(dev);
 }
 
-#define for_each_power_well(i, power_well, domain_mask, power_domains) \
-       for (i = 0;                                                     \
-            i < (power_domains)->power_well_count &&                   \
-                ((power_well) = &(power_domains)->power_wells[i]);     \
-            i++)                                                       \
-               if ((power_well)->domains & (domain_mask))
-
-#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
-       for (i = (power_domains)->power_well_count - 1;                  \
-            i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
-            i--)                                                        \
-               if ((power_well)->domains & (domain_mask))
-
-/**
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       return I915_READ(HSW_PWR_WELL_DRIVER) ==
-                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-}
-
-bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
-                                         enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       bool is_enabled;
-       int i;
-
-       if (dev_priv->pm.suspended)
-               return false;
-
-       power_domains = &dev_priv->power_domains;
-
-       is_enabled = true;
-
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-               if (power_well->always_on)
-                       continue;
-
-               if (!power_well->hw_enabled) {
-                       is_enabled = false;
-                       break;
-               }
-       }
-
-       return is_enabled;
-}
-
-bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
-                                enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains;
-       bool ret;
-
-       power_domains = &dev_priv->power_domains;
-
-       mutex_lock(&power_domains->lock);
-       ret = intel_display_power_enabled_unlocked(dev_priv, domain);
-       mutex_unlock(&power_domains->lock);
-
-       return ret;
-}
-
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-
-       /*
-        * After we re-enable the power well, if we touch VGA register 0x3d5
-        * we'll get unclaimed register interrupts. This stops after we write
-        * anything to the VGA MSR register. The vgacon module uses this
-        * register all the time, so if we unbind our driver and, as a
-        * consequence, bind vgacon, we'll get stuck in an infinite loop at
-        * console_unlock(). So make here we touch the VGA MSR register, making
-        * sure vgacon can keep working normally without triggering interrupts
-        * and error messages.
-        */
-       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-       outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-
-       if (IS_BROADWELL(dev))
-               gen8_irq_power_well_post_enable(dev_priv);
-}
-
-static void hsw_set_power_well(struct drm_i915_private *dev_priv,
-                              struct i915_power_well *power_well, bool enable)
+static void intel_init_fbc(struct drm_i915_private *dev_priv)
 {
-       bool is_enabled, enable_requested;
-       uint32_t tmp;
-
-       tmp = I915_READ(HSW_PWR_WELL_DRIVER);
-       is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
-       enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
-
-       if (enable) {
-               if (!enable_requested)
-                       I915_WRITE(HSW_PWR_WELL_DRIVER,
-                                  HSW_PWR_WELL_ENABLE_REQUEST);
-
-               if (!is_enabled) {
-                       DRM_DEBUG_KMS("Enabling power well\n");
-                       if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
-                                     HSW_PWR_WELL_STATE_ENABLED), 20))
-                               DRM_ERROR("Timeout enabling power well\n");
-               }
-
-               hsw_power_well_post_enable(dev_priv);
-       } else {
-               if (enable_requested) {
-                       I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
-                       POSTING_READ(HSW_PWR_WELL_DRIVER);
-                       DRM_DEBUG_KMS("Requesting to disable the power well\n");
-               }
-       }
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
-
-       /*
-        * We're taking over the BIOS, so clear any requests made by it since
-        * the driver is in charge now.
-        */
-       if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
-               I915_WRITE(HSW_PWR_WELL_BIOS, 0);
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
-                                 struct i915_power_well *power_well)
-{
-       hsw_set_power_well(dev_priv, power_well, true);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       hsw_set_power_well(dev_priv, power_well, false);
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
-                                            struct i915_power_well *power_well)
-{
-       return true;
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
-                              struct i915_power_well *power_well, bool enable)
-{
-       enum punit_power_well power_well_id = power_well->data;
-       u32 mask;
-       u32 state;
-       u32 ctrl;
-
-       mask = PUNIT_PWRGT_MASK(power_well_id);
-       state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
-                        PUNIT_PWRGT_PWR_GATE(power_well_id);
-
-       mutex_lock(&dev_priv->rps.hw_lock);
-
-#define COND \
-       ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
-       if (COND)
-               goto out;
-
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
-       ctrl &= ~mask;
-       ctrl |= state;
-       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
-       if (wait_for(COND, 100))
-               DRM_ERROR("timout setting power well state %08x (%08x)\n",
-                         state,
-                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
-       mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
-                                 struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       int power_well_id = power_well->data;
-       bool enabled = false;
-       u32 mask;
-       u32 state;
-       u32 ctrl;
-
-       mask = PUNIT_PWRGT_MASK(power_well_id);
-       ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
-
-       mutex_lock(&dev_priv->rps.hw_lock);
-
-       state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
-       /*
-        * We only ever set the power-on and power-gate states, anything
-        * else is unexpected.
-        */
-       WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
-               state != PUNIT_PWRGT_PWR_GATE(power_well_id));
-       if (state == ctrl)
-               enabled = true;
-
-       /*
-        * A transient state at this point would mean some unexpected party
-        * is poking at the power controls too.
-        */
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
-       WARN_ON(ctrl != state);
-
-       mutex_unlock(&dev_priv->rps.hw_lock);
-
-       return enabled;
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-                                         struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
-       vlv_set_power_well(dev_priv, power_well, true);
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       valleyview_enable_display_irqs(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       /*
-        * During driver initialization/resume we can avoid restoring the
-        * part of the HW/SW state that will be inited anyway explicitly.
-        */
-       if (dev_priv->power_domains.initializing)
+       if (!HAS_FBC(dev_priv)) {
+               dev_priv->fbc.enabled = false;
                return;
-
-       intel_hpd_init(dev_priv->dev);
-
-       i915_redisable_vga_power_on(dev_priv->dev);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
-       spin_lock_irq(&dev_priv->irq_lock);
-       valleyview_disable_display_irqs(dev_priv);
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       vlv_set_power_well(dev_priv, power_well, false);
-
-       vlv_power_sequencer_reset(dev_priv);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
-       /*
-        * Enable the CRI clock source so we can get at the
-        * display and the reference clock for VGA
-        * hotplug / manual detection.
-        */
-       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                  DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
-       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
-       vlv_set_power_well(dev_priv, power_well, true);
-
-       /*
-        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
-        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
-        *   b. The other bits such as sfr settings / modesel may all
-        *      be set to 0.
-        *
-        * This should only be done on init and resume from S3 with
-        * both PLLs disabled, or we risk losing DPIO and PLL
-        * synchronization.
-        */
-       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
-{
-       enum pipe pipe;
-
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
-       for_each_pipe(dev_priv, pipe)
-               assert_pll_disabled(dev_priv, pipe);
-
-       /* Assert common reset */
-       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
-       vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-                                          struct i915_power_well *power_well)
-{
-       enum dpio_phy phy;
-
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
-                    power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
-
-       /*
-        * Enable the CRI clock source so we can get at the
-        * display and the reference clock for VGA
-        * hotplug / manual detection.
-        */
-       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
-               phy = DPIO_PHY0;
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                          DPLL_REFA_CLK_ENABLE_VLV);
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
-       } else {
-               phy = DPIO_PHY1;
-               I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
-                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
        }
-       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-       vlv_set_power_well(dev_priv, power_well, true);
-
-       /* Poll for phypwrgood signal */
-       if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
-               DRM_ERROR("Display PHY %d is not power up\n", phy);
-
-       I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
-                  PHY_COM_LANE_RESET_DEASSERT(phy));
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-                                           struct i915_power_well *power_well)
-{
-       enum dpio_phy phy;
-
-       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
-                    power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
 
-       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
-               phy = DPIO_PHY0;
-               assert_pll_disabled(dev_priv, PIPE_A);
-               assert_pll_disabled(dev_priv, PIPE_B);
+       if (INTEL_INFO(dev_priv)->gen >= 7) {
+               dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+               dev_priv->display.enable_fbc = gen7_enable_fbc;
+               dev_priv->display.disable_fbc = ironlake_disable_fbc;
+       } else if (INTEL_INFO(dev_priv)->gen >= 5) {
+               dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+               dev_priv->display.enable_fbc = ironlake_enable_fbc;
+               dev_priv->display.disable_fbc = ironlake_disable_fbc;
+       } else if (IS_GM45(dev_priv)) {
+               dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+               dev_priv->display.enable_fbc = g4x_enable_fbc;
+               dev_priv->display.disable_fbc = g4x_disable_fbc;
        } else {
-               phy = DPIO_PHY1;
-               assert_pll_disabled(dev_priv, PIPE_C);
-       }
-
-       I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
-                  ~PHY_COM_LANE_RESET_DEASSERT(phy));
-
-       vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
-{
-       enum pipe pipe = power_well->data;
-       bool enabled;
-       u32 state, ctrl;
-
-       mutex_lock(&dev_priv->rps.hw_lock);
-
-       state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
-       /*
-        * We only ever set the power-on and power-gate states, anything
-        * else is unexpected.
-        */
-       WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
-       enabled = state == DP_SSS_PWR_ON(pipe);
-
-       /*
-        * A transient state at this point would mean some unexpected party
-        * is poking at the power controls too.
-        */
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
-       WARN_ON(ctrl << 16 != state);
-
-       mutex_unlock(&dev_priv->rps.hw_lock);
-
-       return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
-                                   struct i915_power_well *power_well,
-                                   bool enable)
-{
-       enum pipe pipe = power_well->data;
-       u32 state;
-       u32 ctrl;
-
-       state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
-       mutex_lock(&dev_priv->rps.hw_lock);
+               dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+               dev_priv->display.enable_fbc = i8xx_enable_fbc;
+               dev_priv->display.disable_fbc = i8xx_disable_fbc;
 
-#define COND \
-       ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
-
-       if (COND)
-               goto out;
-
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
-       ctrl &= ~DP_SSC_MASK(pipe);
-       ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
-       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
-
-       if (wait_for(COND, 100))
-               DRM_ERROR("timout setting power well state %08x (%08x)\n",
-                         state,
-                         vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
-
-#undef COND
-
-out:
-       mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
-{
-       chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
-                                      struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PIPE_A &&
-                    power_well->data != PIPE_B &&
-                    power_well->data != PIPE_C);
-
-       chv_set_pipe_power_well(dev_priv, power_well, true);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
-                                       struct i915_power_well *power_well)
-{
-       WARN_ON_ONCE(power_well->data != PIPE_A &&
-                    power_well->data != PIPE_B &&
-                    power_well->data != PIPE_C);
-
-       chv_set_pipe_power_well(dev_priv, power_well, false);
-}
-
-static void check_power_well_state(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
-       if (power_well->always_on || !i915.disable_power_well) {
-               if (!enabled)
-                       goto mismatch;
-
-               return;
-       }
-
-       if (enabled != (power_well->count > 0))
-               goto mismatch;
-
-       return;
-
-mismatch:
-       WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
-                 power_well->name, power_well->always_on, enabled,
-                 power_well->count, i915.disable_power_well);
-}
-
-void intel_display_power_get(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       int i;
-
-       intel_runtime_pm_get(dev_priv);
-
-       power_domains = &dev_priv->power_domains;
-
-       mutex_lock(&power_domains->lock);
-
-       for_each_power_well(i, power_well, BIT(domain), power_domains) {
-               if (!power_well->count++) {
-                       DRM_DEBUG_KMS("enabling %s\n", power_well->name);
-                       power_well->ops->enable(dev_priv, power_well);
-                       power_well->hw_enabled = true;
-               }
-
-               check_power_well_state(dev_priv, power_well);
-       }
-
-       power_domains->domain_use_count[domain]++;
-
-       mutex_unlock(&power_domains->lock);
-}
-
-void intel_display_power_put(struct drm_i915_private *dev_priv,
-                            enum intel_display_power_domain domain)
-{
-       struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       int i;
-
-       power_domains = &dev_priv->power_domains;
-
-       mutex_lock(&power_domains->lock);
-
-       WARN_ON(!power_domains->domain_use_count[domain]);
-       power_domains->domain_use_count[domain]--;
-
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-               WARN_ON(!power_well->count);
-
-               if (!--power_well->count && i915.disable_power_well) {
-                       DRM_DEBUG_KMS("disabling %s\n", power_well->name);
-                       power_well->hw_enabled = false;
-                       power_well->ops->disable(dev_priv, power_well);
-               }
-
-               check_power_well_state(dev_priv, power_well);
-       }
-
-       mutex_unlock(&power_domains->lock);
-
-       intel_runtime_pm_put(dev_priv);
-}
-
-static struct i915_power_domains *hsw_pwr;
-
-/* Display audio driver power well request */
-int i915_request_power_well(void)
-{
-       struct drm_i915_private *dev_priv;
-
-       if (!hsw_pwr)
-               return -ENODEV;
-
-       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-                               power_domains);
-       intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(i915_request_power_well);
-
-/* Display audio driver power well release */
-int i915_release_power_well(void)
-{
-       struct drm_i915_private *dev_priv;
-
-       if (!hsw_pwr)
-               return -ENODEV;
-
-       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-                               power_domains);
-       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(i915_release_power_well);
-
-/*
- * Private interface for the audio driver to get CDCLK in kHz.
- *
- * Caller must request power well using i915_request_power_well() prior to
- * making the call.
- */
-int i915_get_cdclk_freq(void)
-{
-       struct drm_i915_private *dev_priv;
-
-       if (!hsw_pwr)
-               return -ENODEV;
-
-       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-                               power_domains);
-
-       return intel_ddi_get_cdclk_freq(dev_priv);
-}
-EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
-
-
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
-#define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
-       BIT(POWER_DOMAIN_PIPE_A) |                      \
-       BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
-       BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
-       BIT(POWER_DOMAIN_PORT_CRT) |                    \
-       BIT(POWER_DOMAIN_PLLS) |                        \
-       BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS (                            \
-       (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
-       BIT(POWER_DOMAIN_INIT))
-
-#define BDW_ALWAYS_ON_POWER_DOMAINS (                  \
-       HSW_ALWAYS_ON_POWER_DOMAINS |                   \
-       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS (                            \
-       (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_ALWAYS_ON_POWER_DOMAINS    BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS      POWER_DOMAIN_MASK
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS (                \
-       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_CRT) |            \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_A_POWER_DOMAINS (     \
-       BIT(POWER_DOMAIN_PIPE_A) |      \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_B_POWER_DOMAINS (     \
-       BIT(POWER_DOMAIN_PIPE_B) |      \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_C_POWER_DOMAINS (     \
-       BIT(POWER_DOMAIN_PIPE_C) |      \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS (                \
-       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS (         \
-       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
-       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
-       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
-       BIT(POWER_DOMAIN_INIT))
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
-       .sync_hw = i9xx_always_on_power_well_noop,
-       .enable = i9xx_always_on_power_well_noop,
-       .disable = i9xx_always_on_power_well_noop,
-       .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
-       .sync_hw = chv_pipe_power_well_sync_hw,
-       .enable = chv_pipe_power_well_enable,
-       .disable = chv_pipe_power_well_disable,
-       .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
-       .sync_hw = vlv_power_well_sync_hw,
-       .enable = chv_dpio_cmn_power_well_enable,
-       .disable = chv_dpio_cmn_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well i9xx_always_on_power_well[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
-       .sync_hw = hsw_power_well_sync_hw,
-       .enable = hsw_power_well_enable,
-       .disable = hsw_power_well_disable,
-       .is_enabled = hsw_power_well_enabled,
-};
-
-static struct i915_power_well hsw_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-       {
-               .name = "display",
-               .domains = HSW_DISPLAY_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-       },
-};
-
-static struct i915_power_well bdw_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-       {
-               .name = "display",
-               .domains = BDW_DISPLAY_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-       },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
-       .sync_hw = vlv_power_well_sync_hw,
-       .enable = vlv_display_power_well_enable,
-       .disable = vlv_display_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
-       .sync_hw = vlv_power_well_sync_hw,
-       .enable = vlv_dpio_cmn_power_well_enable,
-       .disable = vlv_dpio_cmn_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
-       .sync_hw = vlv_power_well_sync_hw,
-       .enable = vlv_power_well_enable,
-       .disable = vlv_power_well_disable,
-       .is_enabled = vlv_power_well_enabled,
-};
-
-static struct i915_power_well vlv_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-       {
-               .name = "display",
-               .domains = VLV_DISPLAY_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DISP2D,
-               .ops = &vlv_display_power_well_ops,
-       },
-       {
-               .name = "dpio-tx-b-01",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
-       },
-       {
-               .name = "dpio-tx-b-23",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
-       },
-       {
-               .name = "dpio-tx-c-01",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
-       },
-       {
-               .name = "dpio-tx-c-23",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
-       },
-       {
-               .name = "dpio-common",
-               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-               .ops = &vlv_dpio_cmn_power_well_ops,
-       },
-};
-
-static struct i915_power_well chv_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = 1,
-               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
-               .ops = &i9xx_always_on_power_well_ops,
-       },
-#if 0
-       {
-               .name = "display",
-               .domains = VLV_DISPLAY_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DISP2D,
-               .ops = &vlv_display_power_well_ops,
-       },
-       {
-               .name = "pipe-a",
-               .domains = CHV_PIPE_A_POWER_DOMAINS,
-               .data = PIPE_A,
-               .ops = &chv_pipe_power_well_ops,
-       },
-       {
-               .name = "pipe-b",
-               .domains = CHV_PIPE_B_POWER_DOMAINS,
-               .data = PIPE_B,
-               .ops = &chv_pipe_power_well_ops,
-       },
-       {
-               .name = "pipe-c",
-               .domains = CHV_PIPE_C_POWER_DOMAINS,
-               .data = PIPE_C,
-               .ops = &chv_pipe_power_well_ops,
-       },
-#endif
-       {
-               .name = "dpio-common-bc",
-               /*
-                * XXX: cmnreset for one PHY seems to disturb the other.
-                * As a workaround keep both powered on at the same
-                * time for now.
-                */
-               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-               .ops = &chv_dpio_cmn_power_well_ops,
-       },
-       {
-               .name = "dpio-common-d",
-               /*
-                * XXX: cmnreset for one PHY seems to disturb the other.
-                * As a workaround keep both powered on at the same
-                * time for now.
-                */
-               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DPIO_CMN_D,
-               .ops = &chv_dpio_cmn_power_well_ops,
-       },
-#if 0
-       {
-               .name = "dpio-tx-b-01",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
-       },
-       {
-               .name = "dpio-tx-b-23",
-               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
-       },
-       {
-               .name = "dpio-tx-c-01",
-               .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
-       },
-       {
-               .name = "dpio-tx-c-23",
-               .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
-       },
-       {
-               .name = "dpio-tx-d-01",
-               .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
-                          CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
-       },
-       {
-               .name = "dpio-tx-d-23",
-               .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
-                          CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
-               .ops = &vlv_dpio_power_well_ops,
-               .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
-       },
-#endif
-};
-
-static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
-                                                enum punit_power_well power_well_id)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
-       int i;
-
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-               if (power_well->data == power_well_id)
-                       return power_well;
+               /* This value was pulled out of someone's hat */
+               I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
        }
 
-       return NULL;
-}
-
-#define set_power_wells(power_domains, __power_wells) ({               \
-       (power_domains)->power_wells = (__power_wells);                 \
-       (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
-})
-
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
-       mutex_init(&power_domains->lock);
-
-       /*
-        * The enabling order will be from lower to higher indexed wells,
-        * the disabling order is reversed.
-        */
-       if (IS_HASWELL(dev_priv->dev)) {
-               set_power_wells(power_domains, hsw_power_wells);
-               hsw_pwr = power_domains;
-       } else if (IS_BROADWELL(dev_priv->dev)) {
-               set_power_wells(power_domains, bdw_power_wells);
-               hsw_pwr = power_domains;
-       } else if (IS_CHERRYVIEW(dev_priv->dev)) {
-               set_power_wells(power_domains, chv_power_wells);
-       } else if (IS_VALLEYVIEW(dev_priv->dev)) {
-               set_power_wells(power_domains, vlv_power_wells);
-       } else {
-               set_power_wells(power_domains, i9xx_always_on_power_well);
-       }
-
-       return 0;
-}
-
-void intel_power_domains_remove(struct drm_i915_private *dev_priv)
-{
-       hsw_pwr = NULL;
-}
-
-static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
-       int i;
-
-       mutex_lock(&power_domains->lock);
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-               power_well->ops->sync_hw(dev_priv, power_well);
-               power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
-                                                                    power_well);
-       }
-       mutex_unlock(&power_domains->lock);
-}
-
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_well *cmn =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
-       struct i915_power_well *disp2d =
-               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
-
-       /* nothing to do if common lane is already off */
-       if (!cmn->ops->is_enabled(dev_priv, cmn))
-               return;
-
-       /* If the display might be already active skip this */
-       if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
-           I915_READ(DPIO_CTL) & DPIO_CMNRST)
-               return;
-
-       DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
-       /* cmnlane needs DPLL registers */
-       disp2d->ops->enable(dev_priv, disp2d);
-
-       /*
-        * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
-        * Need to assert and de-assert PHY SB reset by gating the
-        * common lane power, then un-gating it.
-        * Simply ungating isn't enough to reset the PHY enough to get
-        * ports and lanes running.
-        */
-       cmn->ops->disable(dev_priv, cmn);
-}
-
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
-       power_domains->initializing = true;
-
-       if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
-               mutex_lock(&power_domains->lock);
-               vlv_cmnlane_wa(dev_priv);
-               mutex_unlock(&power_domains->lock);
-       }
-
-       /* For now, we need the power well to be always enabled. */
-       intel_display_set_init_power(dev_priv, true);
-       intel_power_domains_resume(dev_priv);
-       power_domains->initializing = false;
-}
-
-void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
-{
-       intel_runtime_pm_get(dev_priv);
-}
-
-void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
-{
-       intel_runtime_pm_put(dev_priv);
-}
-
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
-
-       if (!HAS_RUNTIME_PM(dev))
-               return;
-
-       pm_runtime_get_sync(device);
-       WARN(dev_priv->pm.suspended, "Device still suspended.\n");
-}
-
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
-
-       if (!HAS_RUNTIME_PM(dev))
-               return;
-
-       WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
-       pm_runtime_get_noresume(device);
-}
-
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
-
-       if (!HAS_RUNTIME_PM(dev))
-               return;
-
-       pm_runtime_mark_last_busy(device);
-       pm_runtime_put_autosuspend(device);
-}
-
-void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
-
-       if (!HAS_RUNTIME_PM(dev))
-               return;
-
-       pm_runtime_set_active(device);
-
-       /*
-        * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
-        * requirement.
-        */
-       if (!intel_enable_rc6(dev)) {
-               DRM_INFO("RC6 disabled, disabling runtime PM support\n");
-               return;
-       }
-
-       pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
-       pm_runtime_mark_last_busy(device);
-       pm_runtime_use_autosuspend(device);
-
-       pm_runtime_put_autosuspend(device);
-}
-
-void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct device *device = &dev->pdev->dev;
-
-       if (!HAS_RUNTIME_PM(dev))
-               return;
-
-       if (!intel_enable_rc6(dev))
-               return;
-
-       /* Make sure we're not suspended first. */
-       pm_runtime_get_sync(device);
-       pm_runtime_disable(device);
+       dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
 }
 
 /* Set up chip specific power management-related functions */
@@ -7203,28 +6112,7 @@ void intel_init_pm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (HAS_FBC(dev)) {
-               if (INTEL_INFO(dev)->gen >= 7) {
-                       dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-                       dev_priv->display.enable_fbc = gen7_enable_fbc;
-                       dev_priv->display.disable_fbc = ironlake_disable_fbc;
-               } else if (INTEL_INFO(dev)->gen >= 5) {
-                       dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-                       dev_priv->display.enable_fbc = ironlake_enable_fbc;
-                       dev_priv->display.disable_fbc = ironlake_disable_fbc;
-               } else if (IS_GM45(dev)) {
-                       dev_priv->display.fbc_enabled = g4x_fbc_enabled;
-                       dev_priv->display.enable_fbc = g4x_enable_fbc;
-                       dev_priv->display.disable_fbc = g4x_disable_fbc;
-               } else {
-                       dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-                       dev_priv->display.enable_fbc = i8xx_enable_fbc;
-                       dev_priv->display.disable_fbc = i8xx_disable_fbc;
-
-                       /* This value was pulled out of someone's hat */
-                       I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
-               }
-       }
+       intel_init_fbc(dev_priv);
 
        /* For cxsr */
        if (IS_PINEVIEW(dev))
@@ -7233,7 +6121,9 @@ void intel_init_pm(struct drm_device *dev)
                i915_ironlake_get_mem_freq(dev);
 
        /* For FIFO watermark updates */
-       if (HAS_PCH_SPLIT(dev)) {
+       if (IS_GEN9(dev)) {
+               dev_priv->display.init_clock_gating = gen9_init_clock_gating;
+       } else if (HAS_PCH_SPLIT(dev)) {
                ilk_setup_wm_latency(dev);
 
                if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
@@ -7490,5 +6380,4 @@ void intel_pm_setup(struct drm_device *dev)
                          intel_gen6_powersave_work);
 
        dev_priv->pm.suspended = false;
-       dev_priv->pm._irqs_disabled = false;
 }
index 0a80e41..816a692 100644 (file)
@@ -729,8 +729,12 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
         * workaround for for a possible hang in the unlikely event a TLB
         * invalidation occurs during a PSD flush.
         */
+       /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
        intel_ring_emit_wa(ring, HDC_CHICKEN0,
-                          _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
+                          _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT |
+                                             (IS_BDW_GT3(dev) ?
+                                              HDC_FENCE_DEST_SLM_DISABLE : 0)
+                                  ));
 
        /* Wa4x4STCOptimizationDisable:bdw */
        intel_ring_emit_wa(ring, CACHE_MODE_1,
@@ -812,7 +816,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
         *
         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
         */
-       if (INTEL_INFO(dev)->gen >= 6)
+       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
@@ -1186,7 +1190,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1217,7 +1221,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (!intel_irqs_enabled(dev_priv))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1254,7 +1258,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (!intel_irqs_enabled(dev_priv))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1388,8 +1392,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
-              return false;
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+               return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (ring->irq_refcount++ == 0) {
@@ -1431,7 +1435,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1451,9 +1455,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
-               return;
-
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--ring->irq_refcount == 0) {
                I915_WRITE_IMR(ring, ~0);
@@ -1469,7 +1470,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
-       if (!dev->irq_enabled)
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -2229,6 +2230,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
                           u32 invalidate, u32 flush)
 {
        struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t cmd;
        int ret;
 
@@ -2259,8 +2261,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
        }
        intel_ring_advance(ring);
 
-       if (IS_GEN7(dev) && !invalidate && flush)
-               return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+       if (!invalidate && flush) {
+               if (IS_GEN7(dev))
+                       return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+               else if (IS_BROADWELL(dev))
+                       dev_priv->fbc.need_sw_cache_clean = true;
+       }
 
        return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644 (file)
index 0000000..36749b9
--- /dev/null
@@ -0,0 +1,1375 @@
+/*
+ * Copyright Â© 2012-2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *    Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/i915_powerwell.h>
+
+/**
+ * DOC: runtime pm
+ *
+ * The i915 driver supports dynamic enabling and disabling of entire hardware
+ * blocks at runtime. This is especially important on the display side where
+ * software is supposed to control many power gates manually on recent hardware,
+ * since on the GT side a lot of the power management is done by the hardware.
+ * But even there some manual control at the device level is required.
+ *
+ * Since i915 supports a diverse set of platforms with a unified codebase and
+ * hardware engineers just love to shuffle functionality around between power
+ * domains there's a sizeable amount of indirection required. This file provides
+ * generic functions to the driver for grabbing and releasing references for
+ * abstract power domains. It then maps those to the actual power wells
+ * present for a given platform.
+ */
+
+static struct i915_power_domains *hsw_pwr;
+
+#define for_each_power_well(i, power_well, domain_mask, power_domains) \
+       for (i = 0;                                                     \
+            i < (power_domains)->power_well_count &&                   \
+                ((power_well) = &(power_domains)->power_wells[i]);     \
+            i++)                                                       \
+               if ((power_well)->domains & (domain_mask))
+
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+       for (i = (power_domains)->power_well_count - 1;                  \
+            i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+            i--)                                                        \
+               if ((power_well)->domains & (domain_mask))
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       return I915_READ(HSW_PWR_WELL_DRIVER) ==
+                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                     enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       bool is_enabled;
+       int i;
+
+       if (dev_priv->pm.suspended)
+               return false;
+
+       power_domains = &dev_priv->power_domains;
+
+       is_enabled = true;
+
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+               if (power_well->always_on)
+                       continue;
+
+               if (!power_well->hw_enabled) {
+                       is_enabled = false;
+                       break;
+               }
+       }
+
+       return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+                                   enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       bool ret;
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+       ret = __intel_display_power_is_enabled(dev_priv, domain);
+       mutex_unlock(&power_domains->lock);
+
+       return ret;
+}
+
+/**
+ * intel_display_set_init_power - set the initial power domain state
+ * @dev_priv: i915 device instance
+ * @enable: whether to enable or disable the initial power domain state
+ *
+ * For simplicity our driver load/unload and system suspend/resume code assumes
+ * that all power domains are always enabled. This functions controls the state
+ * of this little hack. While the initial power domain state is enabled runtime
+ * pm is effectively disabled.
+ */
+void intel_display_set_init_power(struct drm_i915_private *dev_priv,
+                                 bool enable)
+{
+       if (dev_priv->power_domains.init_power_on == enable)
+               return;
+
+       if (enable)
+               intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+       else
+               intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       dev_priv->power_domains.init_power_on = enable;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+
+       /*
+        * After we re-enable the power well, if we touch VGA register 0x3d5
+        * we'll get unclaimed register interrupts. This stops after we write
+        * anything to the VGA MSR register. The vgacon module uses this
+        * register all the time, so if we unbind our driver and, as a
+        * consequence, bind vgacon, we'll get stuck in an infinite loop at
+        * console_unlock(). So make here we touch the VGA MSR register, making
+        * sure vgacon can keep working normally without triggering interrupts
+        * and error messages.
+        */
+       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+       outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+
+       if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
+               gen8_irq_power_well_post_enable(dev_priv);
+}
+
+static void hsw_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
+{
+       bool is_enabled, enable_requested;
+       uint32_t tmp;
+
+       tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+       is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+       enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
+
+       if (enable) {
+               if (!enable_requested)
+                       I915_WRITE(HSW_PWR_WELL_DRIVER,
+                                  HSW_PWR_WELL_ENABLE_REQUEST);
+
+               if (!is_enabled) {
+                       DRM_DEBUG_KMS("Enabling power well\n");
+                       if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+                                     HSW_PWR_WELL_STATE_ENABLED), 20))
+                               DRM_ERROR("Timeout enabling power well\n");
+               }
+
+               hsw_power_well_post_enable(dev_priv);
+       } else {
+               if (enable_requested) {
+                       I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+                       POSTING_READ(HSW_PWR_WELL_DRIVER);
+                       DRM_DEBUG_KMS("Requesting to disable the power well\n");
+               }
+       }
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
+
+       /*
+        * We're taking over the BIOS, so clear any requests made by it since
+        * the driver is in charge now.
+        */
+       if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+               I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       hsw_set_power_well(dev_priv, power_well, true);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       hsw_set_power_well(dev_priv, power_well, false);
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+                                            struct i915_power_well *power_well)
+{
+       return true;
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
+{
+       enum punit_power_well power_well_id = power_well->data;
+       u32 mask;
+       u32 state;
+       u32 ctrl;
+
+       mask = PUNIT_PWRGT_MASK(power_well_id);
+       state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
+                        PUNIT_PWRGT_PWR_GATE(power_well_id);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+       ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+       if (COND)
+               goto out;
+
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+       ctrl &= ~mask;
+       ctrl |= state;
+       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+       if (wait_for(COND, 100))
+               DRM_ERROR("timout setting power well state %08x (%08x)\n",
+                         state,
+                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+                                 struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       int power_well_id = power_well->data;
+       bool enabled = false;
+       u32 mask;
+       u32 state;
+       u32 ctrl;
+
+       mask = PUNIT_PWRGT_MASK(power_well_id);
+       ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+       /*
+        * We only ever set the power-on and power-gate states, anything
+        * else is unexpected.
+        */
+       WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
+               state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+       if (state == ctrl)
+               enabled = true;
+
+       /*
+        * A transient state at this point would mean some unexpected party
+        * is poking at the power controls too.
+        */
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+       WARN_ON(ctrl != state);
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       return enabled;
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+                                         struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       valleyview_enable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       /*
+        * During driver initialization/resume we can avoid restoring the
+        * part of the HW/SW state that will be inited anyway explicitly.
+        */
+       if (dev_priv->power_domains.initializing)
+               return;
+
+       intel_hpd_init(dev_priv);
+
+       i915_redisable_vga_power_on(dev_priv->dev);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       valleyview_disable_display_irqs(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+
+       vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+       /*
+        * Enable the CRI clock source so we can get at the
+        * display and the reference clock for VGA
+        * hotplug / manual detection.
+        */
+       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                  DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       /*
+        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
+        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
+        *   b. The other bits such as sfr settings / modesel may all
+        *      be set to 0.
+        *
+        * This should only be done on init and resume from S3 with
+        * both PLLs disabled, or we risk losing DPIO and PLL
+        * synchronization.
+        */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       enum pipe pipe;
+
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+       for_each_pipe(dev_priv, pipe)
+               assert_pll_disabled(dev_priv, pipe);
+
+       /* Assert common reset */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       enum dpio_phy phy;
+
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+                    power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+       /*
+        * Enable the CRI clock source so we can get at the
+        * display and the reference clock for VGA
+        * hotplug / manual detection.
+        */
+       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+               phy = DPIO_PHY0;
+               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                          DPLL_REFA_CLK_ENABLE_VLV);
+               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       } else {
+               phy = DPIO_PHY1;
+               I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
+                          DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       }
+       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       /* Poll for phypwrgood signal */
+       if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+               DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
+                  PHY_COM_LANE_RESET_DEASSERT(phy));
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       enum dpio_phy phy;
+
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
+                    power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
+
+       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+               phy = DPIO_PHY0;
+               assert_pll_disabled(dev_priv, PIPE_A);
+               assert_pll_disabled(dev_priv, PIPE_B);
+       } else {
+               phy = DPIO_PHY1;
+               assert_pll_disabled(dev_priv, PIPE_C);
+       }
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
+                  ~PHY_COM_LANE_RESET_DEASSERT(phy));
+
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+                                       struct i915_power_well *power_well)
+{
+       enum pipe pipe = power_well->data;
+       bool enabled;
+       u32 state, ctrl;
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+       /*
+        * We only ever set the power-on and power-gate states, anything
+        * else is unexpected.
+        */
+       WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+       enabled = state == DP_SSS_PWR_ON(pipe);
+
+       /*
+        * A transient state at this point would mean some unexpected party
+        * is poking at the power controls too.
+        */
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+       WARN_ON(ctrl << 16 != state);
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+                                   struct i915_power_well *power_well,
+                                   bool enable)
+{
+       enum pipe pipe = power_well->data;
+       u32 state;
+       u32 ctrl;
+
+       state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+#define COND \
+       ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+
+       if (COND)
+               goto out;
+
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+       ctrl &= ~DP_SSC_MASK(pipe);
+       ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+
+       if (wait_for(COND, 100))
+               DRM_ERROR("timout setting power well state %08x (%08x)\n",
+                         state,
+                         vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+
+#undef COND
+
+out:
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                       struct i915_power_well *power_well)
+{
+       chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+                                      struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PIPE_A &&
+                    power_well->data != PIPE_B &&
+                    power_well->data != PIPE_C);
+
+       chv_set_pipe_power_well(dev_priv, power_well, true);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+                                       struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PIPE_A &&
+                    power_well->data != PIPE_B &&
+                    power_well->data != PIPE_C);
+
+       chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
+static void check_power_well_state(struct drm_i915_private *dev_priv,
+                                  struct i915_power_well *power_well)
+{
+       bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
+
+       if (power_well->always_on || !i915.disable_power_well) {
+               if (!enabled)
+                       goto mismatch;
+
+               return;
+       }
+
+       if (enabled != (power_well->count > 0))
+               goto mismatch;
+
+       return;
+
+mismatch:
+       WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
+                 power_well->name, power_well->always_on, enabled,
+                 power_well->count, i915.disable_power_well);
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+void intel_display_power_get(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       intel_runtime_pm_get(dev_priv);
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+
+       for_each_power_well(i, power_well, BIT(domain), power_domains) {
+               if (!power_well->count++) {
+                       DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+                       power_well->ops->enable(dev_priv, power_well);
+                       power_well->hw_enabled = true;
+               }
+
+               check_power_well_state(dev_priv, power_well);
+       }
+
+       power_domains->domain_use_count[domain]++;
+
+       mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+                            enum intel_display_power_domain domain)
+{
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+
+       WARN_ON(!power_domains->domain_use_count[domain]);
+       power_domains->domain_use_count[domain]--;
+
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+               WARN_ON(!power_well->count);
+
+               if (!--power_well->count && i915.disable_power_well) {
+                       DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+                       power_well->hw_enabled = false;
+                       power_well->ops->disable(dev_priv, power_well);
+               }
+
+               check_power_well_state(dev_priv, power_well);
+       }
+
+       mutex_unlock(&power_domains->lock);
+
+       intel_runtime_pm_put(dev_priv);
+}
+
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
+       BIT(POWER_DOMAIN_PIPE_A) |                      \
+       BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
+       BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
+       BIT(POWER_DOMAIN_PORT_CRT) |                    \
+       BIT(POWER_DOMAIN_PLLS) |                        \
+       BIT(POWER_DOMAIN_INIT))
+#define HSW_DISPLAY_POWER_DOMAINS (                            \
+       (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
+       BIT(POWER_DOMAIN_INIT))
+
+#define BDW_ALWAYS_ON_POWER_DOMAINS (                  \
+       HSW_ALWAYS_ON_POWER_DOMAINS |                   \
+       BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
+#define BDW_DISPLAY_POWER_DOMAINS (                            \
+       (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_ALWAYS_ON_POWER_DOMAINS    BIT(POWER_DOMAIN_INIT)
+#define VLV_DISPLAY_POWER_DOMAINS      POWER_DOMAIN_MASK
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS (                \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_CRT) |            \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_A_POWER_DOMAINS (     \
+       BIT(POWER_DOMAIN_PIPE_A) |      \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_B_POWER_DOMAINS (     \
+       BIT(POWER_DOMAIN_PIPE_B) |      \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_PIPE_C_POWER_DOMAINS (     \
+       BIT(POWER_DOMAIN_PIPE_C) |      \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS (                \
+       BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS (         \
+       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |  \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
+       BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |  \
+       BIT(POWER_DOMAIN_INIT))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+       .sync_hw = i9xx_always_on_power_well_noop,
+       .enable = i9xx_always_on_power_well_noop,
+       .disable = i9xx_always_on_power_well_noop,
+       .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+       .sync_hw = chv_pipe_power_well_sync_hw,
+       .enable = chv_pipe_power_well_enable,
+       .disable = chv_pipe_power_well_disable,
+       .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = chv_dpio_cmn_power_well_enable,
+       .disable = chv_dpio_cmn_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well i9xx_always_on_power_well[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+       .sync_hw = hsw_power_well_sync_hw,
+       .enable = hsw_power_well_enable,
+       .disable = hsw_power_well_disable,
+       .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+       {
+               .name = "display",
+               .domains = HSW_DISPLAY_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+       },
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+       {
+               .name = "display",
+               .domains = BDW_DISPLAY_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+       },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_display_power_well_enable,
+       .disable = vlv_display_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_dpio_cmn_power_well_enable,
+       .disable = vlv_dpio_cmn_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_power_well_enable,
+       .disable = vlv_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
+static struct i915_power_well vlv_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+       {
+               .name = "display",
+               .domains = VLV_DISPLAY_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DISP2D,
+               .ops = &vlv_display_power_well_ops,
+       },
+       {
+               .name = "dpio-tx-b-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+       },
+       {
+               .name = "dpio-tx-b-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+       },
+       {
+               .name = "dpio-tx-c-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+       },
+       {
+               .name = "dpio-tx-c-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+       },
+       {
+               .name = "dpio-common",
+               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+               .ops = &vlv_dpio_cmn_power_well_ops,
+       },
+};
+
+static struct i915_power_well chv_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+               .ops = &i9xx_always_on_power_well_ops,
+       },
+#if 0
+       {
+               .name = "display",
+               .domains = VLV_DISPLAY_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DISP2D,
+               .ops = &vlv_display_power_well_ops,
+       },
+       {
+               .name = "pipe-a",
+               .domains = CHV_PIPE_A_POWER_DOMAINS,
+               .data = PIPE_A,
+               .ops = &chv_pipe_power_well_ops,
+       },
+       {
+               .name = "pipe-b",
+               .domains = CHV_PIPE_B_POWER_DOMAINS,
+               .data = PIPE_B,
+               .ops = &chv_pipe_power_well_ops,
+       },
+       {
+               .name = "pipe-c",
+               .domains = CHV_PIPE_C_POWER_DOMAINS,
+               .data = PIPE_C,
+               .ops = &chv_pipe_power_well_ops,
+       },
+#endif
+       {
+               .name = "dpio-common-bc",
+               /*
+                * XXX: cmnreset for one PHY seems to disturb the other.
+                * As a workaround keep both powered on at the same
+                * time for now.
+                */
+               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+               .ops = &chv_dpio_cmn_power_well_ops,
+       },
+       {
+               .name = "dpio-common-d",
+               /*
+                * XXX: cmnreset for one PHY seems to disturb the other.
+                * As a workaround keep both powered on at the same
+                * time for now.
+                */
+               .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DPIO_CMN_D,
+               .ops = &chv_dpio_cmn_power_well_ops,
+       },
+#if 0
+       {
+               .name = "dpio-tx-b-01",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+       },
+       {
+               .name = "dpio-tx-b-23",
+               .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+       },
+       {
+               .name = "dpio-tx-c-01",
+               .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+       },
+       {
+               .name = "dpio-tx-c-23",
+               .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+                          VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+       },
+       {
+               .name = "dpio-tx-d-01",
+               .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+                          CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
+       },
+       {
+               .name = "dpio-tx-d-23",
+               .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
+                          CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
+               .ops = &vlv_dpio_power_well_ops,
+               .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
+       },
+#endif
+};
+
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+                                                enum punit_power_well power_well_id)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               if (power_well->data == power_well_id)
+                       return power_well;
+       }
+
+       return NULL;
+}
+
+#define set_power_wells(power_domains, __power_wells) ({               \
+       (power_domains)->power_wells = (__power_wells);                 \
+       (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
+})
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+       mutex_init(&power_domains->lock);
+
+       /*
+        * The enabling order will be from lower to higher indexed wells,
+        * the disabling order is reversed.
+        */
+       if (IS_HASWELL(dev_priv->dev)) {
+               set_power_wells(power_domains, hsw_power_wells);
+               hsw_pwr = power_domains;
+       } else if (IS_BROADWELL(dev_priv->dev)) {
+               set_power_wells(power_domains, bdw_power_wells);
+               hsw_pwr = power_domains;
+       } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+               set_power_wells(power_domains, chv_power_wells);
+       } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+               set_power_wells(power_domains, vlv_power_wells);
+       } else {
+               set_power_wells(power_domains, i9xx_always_on_power_well);
+       }
+
+       return 0;
+}
+
+static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       if (!intel_enable_rc6(dev))
+               return;
+
+       /* Make sure we're not suspended first. */
+       pm_runtime_get_sync(device);
+       pm_runtime_disable(device);
+}
+
+/**
+ * intel_power_domains_fini - finalizes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Finalizes the power domain structures for @dev_priv depending upon the
+ * supported platform. This function also disables runtime pm and ensures that
+ * the device stays powered up so that the driver can be reloaded.
+ */
+void intel_power_domains_fini(struct drm_i915_private *dev_priv)
+{
+       intel_runtime_pm_disable(dev_priv);
+
+       /* The i915.ko module is still not prepared to be loaded when
+        * the power well is not enabled, so just enable it in case
+        * we're going to unload/reload. */
+       intel_display_set_init_power(dev_priv, true);
+
+       hsw_pwr = NULL;
+}
+
+static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       mutex_lock(&power_domains->lock);
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               power_well->ops->sync_hw(dev_priv, power_well);
+               power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+                                                                    power_well);
+       }
+       mutex_unlock(&power_domains->lock);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *cmn =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+       struct i915_power_well *disp2d =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+       /* nothing to do if common lane is already off */
+       if (!cmn->ops->is_enabled(dev_priv, cmn))
+               return;
+
+       /* If the display might be already active skip this */
+       if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
+           I915_READ(DPIO_CTL) & DPIO_CMNRST)
+               return;
+
+       DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+       /* cmnlane needs DPLL registers */
+       disp2d->ops->enable(dev_priv, disp2d);
+
+       /*
+        * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+        * Need to assert and de-assert PHY SB reset by gating the
+        * common lane power, then un-gating it.
+        * Simply ungating isn't enough to reset the PHY enough to get
+        * ports and lanes running.
+        */
+       cmn->ops->disable(dev_priv, cmn);
+}
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @dev_priv: i915 device instance
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power domains using intel_display_set_init_power().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+       power_domains->initializing = true;
+
+       if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+               mutex_lock(&power_domains->lock);
+               vlv_cmnlane_wa(dev_priv);
+               mutex_unlock(&power_domains->lock);
+       }
+
+       /* For now, we need the power well to be always enabled. */
+       intel_display_set_init_power(dev_priv, true);
+       intel_power_domains_resume(dev_priv);
+       power_domains->initializing = false;
+}
+
+/**
+ * intel_aux_display_runtime_get - grab an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a power domain reference for the auxiliary power domain
+ * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
+ * parents are powered up. Therefore users should only grab a reference to the
+ * innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_aux_display_runtime_put() to release the reference again.
+ */
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+{
+       intel_runtime_pm_get(dev_priv);
+}
+
+/**
+ * intel_aux_display_runtime_put - release an auxilliary power domain reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the auxilliary power domain reference obtained by
+ * intel_aux_display_runtime_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
+{
+       intel_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_runtime_pm_get - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on) and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_get_sync(device);
+       WARN(dev_priv->pm.suspended, "Device still suspended.\n");
+}
+
+/**
+ * intel_runtime_pm_get_noresume - grab a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference (mostly used for GEM
+ * code to ensure the GTT or GT is on).
+ *
+ * It will _not_ power up the device but instead only check that it's powered
+ * on.  Therefore it is only valid to call this functions from contexts where
+ * the device is known to be powered up and where trying to power it up would
+ * result in hilarity and deadlocks. That pretty much means only the system
+ * suspend/resume code where this is used to grab runtime pm references for
+ * delayed setup down in work items.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
+       pm_runtime_get_noresume(device);
+}
+
+/**
+ * intel_runtime_pm_put - release a runtime pm reference
+ * @dev_priv: i915 device instance
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_mark_last_busy(device);
+       pm_runtime_put_autosuspend(device);
+}
+
+/**
+ * intel_runtime_pm_enable - enable runtime pm
+ * @dev_priv: i915 device instance
+ *
+ * This function enables runtime pm at the end of the driver load sequence.
+ *
+ * Note that this function does currently not enable runtime pm for the
+ * subordinate display power domains. That is only done on the first modeset
+ * using intel_display_set_init_power().
+ */
+void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct device *device = &dev->pdev->dev;
+
+       if (!HAS_RUNTIME_PM(dev))
+               return;
+
+       pm_runtime_set_active(device);
+
+       /*
+        * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+        * requirement.
+        */
+       if (!intel_enable_rc6(dev)) {
+               DRM_INFO("RC6 disabled, disabling runtime PM support\n");
+               return;
+       }
+
+       pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
+       pm_runtime_mark_last_busy(device);
+       pm_runtime_use_autosuspend(device);
+
+       pm_runtime_put_autosuspend(device);
+}
+
+/* Display audio driver power well request */
+int i915_request_power_well(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+       intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(i915_request_power_well);
+
+/* Display audio driver power well release */
+int i915_release_power_well(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(i915_release_power_well);
+
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
index 07a74ef..750b634 100644 (file)
@@ -138,6 +138,184 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
                I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
 }
 
+static void
+skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
+                struct drm_framebuffer *fb,
+                struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+                unsigned int crtc_w, unsigned int crtc_h,
+                uint32_t x, uint32_t y,
+                uint32_t src_w, uint32_t src_h)
+{
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane + 1;
+       u32 plane_ctl, stride;
+       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+       /* Mask out pixel format bits in case we change it */
+       plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
+       plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
+       plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
+       plane_ctl &= ~PLANE_CTL_TILED_MASK;
+       plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
+
+       /* Trickle feed has to be enabled */
+       plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_RGB565:
+               plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
+               break;
+       case DRM_FORMAT_XBGR8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+               break;
+       /*
+        * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+        * to be already pre-multiplied. We need to add a knob (or a different
+        * DRM_FORMAT) for user-space to configure that.
+        */
+       case DRM_FORMAT_ABGR8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+                            PLANE_CTL_ORDER_RGBX |
+                            PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
+       case DRM_FORMAT_ARGB8888:
+               plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
+                            PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+               break;
+       case DRM_FORMAT_YUYV:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+               break;
+       case DRM_FORMAT_YVYU:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+               break;
+       case DRM_FORMAT_UYVY:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+               break;
+       case DRM_FORMAT_VYUY:
+               plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+               break;
+       default:
+               BUG();
+       }
+
+       switch (obj->tiling_mode) {
+       case I915_TILING_NONE:
+               stride = fb->pitches[0] >> 6;
+               break;
+       case I915_TILING_X:
+               plane_ctl |= PLANE_CTL_TILED_X;
+               stride = fb->pitches[0] >> 9;
+               break;
+       default:
+               BUG();
+       }
+
+       plane_ctl |= PLANE_CTL_ENABLE;
+       plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+       intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
+                                      pixel_size, true,
+                                      src_w != crtc_w || src_h != crtc_h);
+
+       /* Sizes are 0 based */
+       src_w--;
+       src_h--;
+       crtc_w--;
+       crtc_h--;
+
+       I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
+       I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+       I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+       I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+       I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+       I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
+       POSTING_READ(PLANE_SURF(pipe, plane));
+}
+
+static void
+skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
+{
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane + 1;
+
+       I915_WRITE(PLANE_CTL(pipe, plane),
+                  I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
+
+       /* Activate double buffered register update */
+       I915_WRITE(PLANE_CTL(pipe, plane), 0);
+       POSTING_READ(PLANE_CTL(pipe, plane));
+
+       intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
+}
+
+static int
+skl_update_colorkey(struct drm_plane *drm_plane,
+                   struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane;
+       u32 plane_ctl;
+
+       I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
+       I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
+       I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+       plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
+       if (key->flags & I915_SET_COLORKEY_DESTINATION)
+               plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+       else if (key->flags & I915_SET_COLORKEY_SOURCE)
+               plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+       I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
+
+       POSTING_READ(PLANE_CTL(pipe, plane));
+
+       return 0;
+}
+
+static void
+skl_get_colorkey(struct drm_plane *drm_plane,
+                struct drm_intel_sprite_colorkey *key)
+{
+       struct drm_device *dev = drm_plane->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       const int pipe = intel_plane->pipe;
+       const int plane = intel_plane->plane;
+       u32 plane_ctl;
+
+       key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
+       key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
+       key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
+
+       plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
+
+       switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
+       case PLANE_CTL_KEY_ENABLE_DESTINATION:
+               key->flags = I915_SET_COLORKEY_DESTINATION;
+               break;
+       case PLANE_CTL_KEY_ENABLE_SOURCE:
+               key->flags = I915_SET_COLORKEY_SOURCE;
+               break;
+       default:
+               key->flags = I915_SET_COLORKEY_NONE;
+       }
+}
+
 static void
 vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
                 struct drm_framebuffer *fb,
@@ -845,57 +1023,24 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
 }
 
 static int
-intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-                  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                  unsigned int crtc_w, unsigned int crtc_h,
-                  uint32_t src_x, uint32_t src_y,
-                  uint32_t src_w, uint32_t src_h)
+intel_check_sprite_plane(struct drm_plane *plane,
+                        struct intel_plane_state *state)
 {
-       struct drm_device *dev = plane->dev;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       enum pipe pipe = intel_crtc->pipe;
+       struct drm_framebuffer *fb = state->fb;
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
-       struct drm_i915_gem_object *old_obj = intel_plane->obj;
-       int ret;
-       bool primary_enabled;
-       bool visible;
+       int crtc_x, crtc_y;
+       unsigned int crtc_w, crtc_h;
+       uint32_t src_x, src_y, src_w, src_h;
+       struct drm_rect *src = &state->src;
+       struct drm_rect *dst = &state->dst;
+       struct drm_rect *orig_src = &state->orig_src;
+       const struct drm_rect *clip = &state->clip;
        int hscale, vscale;
        int max_scale, min_scale;
        int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       struct drm_rect src = {
-               /* sample coordinates in 16.16 fixed point */
-               .x1 = src_x,
-               .x2 = src_x + src_w,
-               .y1 = src_y,
-               .y2 = src_y + src_h,
-       };
-       struct drm_rect dst = {
-               /* integer pixels */
-               .x1 = crtc_x,
-               .x2 = crtc_x + crtc_w,
-               .y1 = crtc_y,
-               .y2 = crtc_y + crtc_h,
-       };
-       const struct drm_rect clip = {
-               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
-               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
-       };
-       const struct {
-               int crtc_x, crtc_y;
-               unsigned int crtc_w, crtc_h;
-               uint32_t src_x, src_y, src_w, src_h;
-       } orig = {
-               .crtc_x = crtc_x,
-               .crtc_y = crtc_y,
-               .crtc_w = crtc_w,
-               .crtc_h = crtc_h,
-               .src_x = src_x,
-               .src_y = src_y,
-               .src_w = src_w,
-               .src_h = src_h,
-       };
 
        /* Don't modify another pipe's plane */
        if (intel_plane->pipe != intel_crtc->pipe) {
@@ -927,55 +1072,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        max_scale = intel_plane->max_downscale << 16;
        min_scale = intel_plane->can_scale ? 1 : (1 << 16);
 
-       drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
+       drm_rect_rotate(src, fb->width << 16, fb->height << 16,
                        intel_plane->rotation);
 
-       hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
+       hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
        BUG_ON(hscale < 0);
 
-       vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
+       vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
        BUG_ON(vscale < 0);
 
-       visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
+       state->visible =  drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
-       crtc_x = dst.x1;
-       crtc_y = dst.y1;
-       crtc_w = drm_rect_width(&dst);
-       crtc_h = drm_rect_height(&dst);
+       crtc_x = dst->x1;
+       crtc_y = dst->y1;
+       crtc_w = drm_rect_width(dst);
+       crtc_h = drm_rect_height(dst);
 
-       if (visible) {
+       if (state->visible) {
                /* check again in case clipping clamped the results */
-               hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
+               hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
                if (hscale < 0) {
                        DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
-                       drm_rect_debug_print(&src, true);
-                       drm_rect_debug_print(&dst, false);
+                       drm_rect_debug_print(src, true);
+                       drm_rect_debug_print(dst, false);
 
                        return hscale;
                }
 
-               vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
+               vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
                if (vscale < 0) {
                        DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
-                       drm_rect_debug_print(&src, true);
-                       drm_rect_debug_print(&dst, false);
+                       drm_rect_debug_print(src, true);
+                       drm_rect_debug_print(dst, false);
 
                        return vscale;
                }
 
                /* Make the source viewport size an exact multiple of the scaling factors. */
-               drm_rect_adjust_size(&src,
-                                    drm_rect_width(&dst) * hscale - drm_rect_width(&src),
-                                    drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+               drm_rect_adjust_size(src,
+                                    drm_rect_width(dst) * hscale - drm_rect_width(src),
+                                    drm_rect_height(dst) * vscale - drm_rect_height(src));
 
-               drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
+               drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
                                    intel_plane->rotation);
 
                /* sanity check to make sure the src viewport wasn't enlarged */
-               WARN_ON(src.x1 < (int) src_x ||
-                       src.y1 < (int) src_y ||
-                       src.x2 > (int) (src_x + src_w) ||
-                       src.y2 > (int) (src_y + src_h));
+               WARN_ON(src->x1 < (int) orig_src->x1 ||
+                       src->y1 < (int) orig_src->y1 ||
+                       src->x2 > (int) orig_src->x2 ||
+                       src->y2 > (int) orig_src->y2);
 
                /*
                 * Hardware doesn't handle subpixel coordinates.
@@ -983,10 +1128,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                 * increase the source viewport size, because that could
                 * push the downscaling factor out of bounds.
                 */
-               src_x = src.x1 >> 16;
-               src_w = drm_rect_width(&src) >> 16;
-               src_y = src.y1 >> 16;
-               src_h = drm_rect_height(&src) >> 16;
+               src_x = src->x1 >> 16;
+               src_w = drm_rect_width(src) >> 16;
+               src_y = src->y1 >> 16;
+               src_h = drm_rect_height(src) >> 16;
 
                if (format_is_yuv(fb->pixel_format)) {
                        src_x &= ~1;
@@ -1000,12 +1145,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                                crtc_w &= ~1;
 
                        if (crtc_w == 0)
-                               visible = false;
+                               state->visible = false;
                }
        }
 
        /* Check size restrictions when scaling */
-       if (visible && (src_w != crtc_w || src_h != crtc_h)) {
+       if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
                unsigned int width_bytes;
 
                WARN_ON(!intel_plane->can_scale);
@@ -1013,12 +1158,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                /* FIXME interlacing min height is 6 */
 
                if (crtc_w < 3 || crtc_h < 3)
-                       visible = false;
+                       state->visible = false;
 
                if (src_w < 3 || src_h < 3)
-                       visible = false;
+                       state->visible = false;
 
-               width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
+               width_bytes = ((src_x * pixel_size) & 63) +
+                                       src_w * pixel_size;
 
                if (src_w > 2048 || src_h > 2048 ||
                    width_bytes > 4096 || fb->pitches[0] > 4096) {
@@ -1027,42 +1173,76 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                }
        }
 
-       dst.x1 = crtc_x;
-       dst.x2 = crtc_x + crtc_w;
-       dst.y1 = crtc_y;
-       dst.y2 = crtc_y + crtc_h;
+       if (state->visible) {
+               src->x1 = src_x;
+               src->x2 = src_x + src_w;
+               src->y1 = src_y;
+               src->y2 = src_y + src_h;
+       }
+
+       dst->x1 = crtc_x;
+       dst->x2 = crtc_x + crtc_w;
+       dst->y1 = crtc_y;
+       dst->y2 = crtc_y + crtc_h;
+
+       return 0;
+}
+
+static int
+intel_commit_sprite_plane(struct drm_plane *plane,
+                         struct intel_plane_state *state)
+{
+       struct drm_device *dev = plane->dev;
+       struct drm_crtc *crtc = state->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
+       enum pipe pipe = intel_crtc->pipe;
+       struct drm_framebuffer *fb = state->fb;
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *old_obj = intel_plane->obj;
+       int crtc_x, crtc_y;
+       unsigned int crtc_w, crtc_h;
+       uint32_t src_x, src_y, src_w, src_h;
+       struct drm_rect *dst = &state->dst;
+       const struct drm_rect *clip = &state->clip;
+       bool primary_enabled;
+       int ret;
 
        /*
         * If the sprite is completely covering the primary plane,
         * we can disable the primary and save power.
         */
-       primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
-       WARN_ON(!primary_enabled && !visible && intel_crtc->active);
+       primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
+       WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
 
-       mutex_lock(&dev->struct_mutex);
 
-       /* Note that this will apply the VT-d workaround for scanouts,
-        * which is more restrictive than required for sprites. (The
-        * primary plane requires 256KiB alignment with 64 PTE padding,
-        * the sprite planes only require 128KiB alignment and 32 PTE padding.
-        */
-       ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-
-       i915_gem_track_fb(old_obj, obj,
-                         INTEL_FRONTBUFFER_SPRITE(pipe));
-       mutex_unlock(&dev->struct_mutex);
+       if (old_obj != obj) {
+               mutex_lock(&dev->struct_mutex);
 
-       if (ret)
-               return ret;
+               /* Note that this will apply the VT-d workaround for scanouts,
+                * which is more restrictive than required for sprites. (The
+                * primary plane requires 256KiB alignment with 64 PTE padding,
+                * the sprite planes only require 128KiB alignment and 32 PTE
+                * padding.
+                */
+               ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+               if (ret == 0)
+                       i915_gem_track_fb(old_obj, obj,
+                                         INTEL_FRONTBUFFER_SPRITE(pipe));
+               mutex_unlock(&dev->struct_mutex);
+               if (ret)
+                       return ret;
+       }
 
-       intel_plane->crtc_x = orig.crtc_x;
-       intel_plane->crtc_y = orig.crtc_y;
-       intel_plane->crtc_w = orig.crtc_w;
-       intel_plane->crtc_h = orig.crtc_h;
-       intel_plane->src_x = orig.src_x;
-       intel_plane->src_y = orig.src_y;
-       intel_plane->src_w = orig.src_w;
-       intel_plane->src_h = orig.src_h;
+       intel_plane->crtc_x = state->orig_dst.x1;
+       intel_plane->crtc_y = state->orig_dst.y1;
+       intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
+       intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
+       intel_plane->src_x = state->orig_src.x1;
+       intel_plane->src_y = state->orig_src.y1;
+       intel_plane->src_w = drm_rect_width(&state->orig_src);
+       intel_plane->src_h = drm_rect_height(&state->orig_src);
        intel_plane->obj = obj;
 
        if (intel_crtc->active) {
@@ -1076,12 +1256,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                if (primary_was_enabled && !primary_enabled)
                        intel_pre_disable_primary(crtc);
 
-               if (visible)
+               if (state->visible) {
+                       crtc_x = state->dst.x1;
+                       crtc_y = state->dst.y1;
+                       crtc_w = drm_rect_width(&state->dst);
+                       crtc_h = drm_rect_height(&state->dst);
+                       src_x = state->src.x1;
+                       src_y = state->src.y1;
+                       src_w = drm_rect_width(&state->src);
+                       src_h = drm_rect_height(&state->src);
                        intel_plane->update_plane(plane, crtc, fb, obj,
                                                  crtc_x, crtc_y, crtc_w, crtc_h,
                                                  src_x, src_y, src_w, src_h);
-               else
+               } else {
                        intel_plane->disable_plane(plane, crtc);
+               }
+
 
                intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
 
@@ -1090,14 +1280,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        }
 
        /* Unpin old obj after new one is active to avoid ugliness */
-       if (old_obj) {
+       if (old_obj && old_obj != obj) {
+
                /*
                 * It's fairly common to simply update the position of
                 * an existing object.  In that case, we don't need to
                 * wait for vblank to avoid ugliness, we only need to
                 * do the pin & ref bookkeeping.
                 */
-               if (old_obj != obj && intel_crtc->active)
+               if (intel_crtc->active)
                        intel_wait_for_vblank(dev, intel_crtc->pipe);
 
                mutex_lock(&dev->struct_mutex);
@@ -1108,6 +1299,46 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        return 0;
 }
 
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+                  unsigned int crtc_w, unsigned int crtc_h,
+                  uint32_t src_x, uint32_t src_y,
+                  uint32_t src_w, uint32_t src_h)
+{
+       struct intel_plane_state state;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int ret;
+
+       state.crtc = crtc;
+       state.fb = fb;
+
+       /* sample coordinates in 16.16 fixed point */
+       state.src.x1 = src_x;
+       state.src.x2 = src_x + src_w;
+       state.src.y1 = src_y;
+       state.src.y2 = src_y + src_h;
+
+       /* integer pixels */
+       state.dst.x1 = crtc_x;
+       state.dst.x2 = crtc_x + crtc_w;
+       state.dst.y1 = crtc_y;
+       state.dst.y2 = crtc_y + crtc_h;
+
+       state.clip.x1 = 0;
+       state.clip.y1 = 0;
+       state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
+       state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
+       state.orig_src = state.src;
+       state.orig_dst = state.dst;
+
+       ret = intel_check_sprite_plane(plane, &state);
+       if (ret)
+               return ret;
+
+       return intel_commit_sprite_plane(plane, &state);
+}
+
 static int
 intel_disable_plane(struct drm_plane *plane)
 {
@@ -1305,6 +1536,18 @@ static uint32_t vlv_plane_formats[] = {
        DRM_FORMAT_VYUY,
 };
 
+static uint32_t skl_plane_formats[] = {
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+};
+
 int
 intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 {
@@ -1368,7 +1611,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
                        num_plane_formats = ARRAY_SIZE(snb_plane_formats);
                }
                break;
-
+       case 9:
+               /*
+                * FIXME: Skylake planes can be scaled (with some restrictions),
+                * but this is for another time.
+                */
+               intel_plane->can_scale = false;
+               intel_plane->max_downscale = 1;
+               intel_plane->update_plane = skl_update_plane;
+               intel_plane->disable_plane = skl_disable_plane;
+               intel_plane->update_colorkey = skl_update_colorkey;
+               intel_plane->get_colorkey = skl_get_colorkey;
+
+               plane_formats = skl_plane_formats;
+               num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+               break;
        default:
                kfree(intel_plane);
                return -ENODEV;
index c14341c..6f5f59b 100644 (file)
@@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long irqflags;
        u32 tv_ctl, save_tv_ctl;
        u32 tv_dac, save_tv_dac;
        int type;
 
        /* Disable TV interrupts around load detect or we'll recurse */
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock_irq(&dev_priv->irq_lock);
                i915_disable_pipestat(dev_priv, 0,
                                      PIPE_HOTPLUG_INTERRUPT_STATUS |
                                      PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock_irq(&dev_priv->irq_lock);
        }
 
        save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 
        /* Restore interrupt config */
        if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               spin_lock_irq(&dev_priv->irq_lock);
                i915_enable_pipestat(dev_priv, 0,
                                     PIPE_HOTPLUG_INTERRUPT_STATUS |
                                     PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               spin_unlock_irq(&dev_priv->irq_lock);
        }
 
        return type;
index 918b761..0b0f4f8 100644 (file)
@@ -194,13 +194,15 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
                                                int fw_engine)
 {
+       /*
+        * WaRsDontPollForAckOnClearingFWBits:vlv
+        * Hardware clears ack bits lazily (only when all ack
+        * bits become 0) so don't poll for individiual ack
+        * bits to be clear here like on other platforms.
+        */
+
        /* Check for Render Engine */
        if (FORCEWAKE_RENDER & fw_engine) {
-               if (wait_for_atomic((__raw_i915_read32(dev_priv,
-                                               FORCEWAKE_ACK_VLV) &
-                                               FORCEWAKE_KERNEL) == 0,
-                                       FORCEWAKE_ACK_TIMEOUT_MS))
-                       DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
 
                __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
                                   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
@@ -214,11 +216,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
 
        /* Check for Media Engine */
        if (FORCEWAKE_MEDIA & fw_engine) {
-               if (wait_for_atomic((__raw_i915_read32(dev_priv,
-                                               FORCEWAKE_ACK_MEDIA_VLV) &
-                                               FORCEWAKE_KERNEL) == 0,
-                                       FORCEWAKE_ACK_TIMEOUT_MS))
-                       DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
 
                __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
                                   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
@@ -968,7 +965,7 @@ static const struct register_whitelist {
        /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
        uint32_t gen_bitmask;
 } whitelist[] = {
-       { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
+       { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
 };
 
 int i915_reg_read_ioctl(struct drm_device *dev,
index 9305c71..8edeed0 100644 (file)
 #define DP_TEST_CRC_B_CB                   0x244
 
 #define DP_TEST_SINK_MISC                  0x246
-#define DP_TEST_CRC_SUPPORTED              (1 << 5)
+# define DP_TEST_CRC_SUPPORTED             (1 << 5)
+# define DP_TEST_COUNT_MASK                0x7
 
 #define DP_TEST_RESPONSE                   0x260
 # define DP_TEST_ACK                       (1 << 0)
 #define DP_TEST_EDID_CHECKSUM              0x261
 
 #define DP_TEST_SINK                       0x270
-#define DP_TEST_SINK_START         (1 << 0)
+# define DP_TEST_SINK_START                (1 << 0)
 
 #define DP_PAYLOAD_TABLE_UPDATE_STATUS      0x2c0   /* 1.2 MST */
 # define DP_PAYLOAD_TABLE_UPDATED           (1 << 0)
index a70d456..180ad0e 100644 (file)
        INTEL_VGA_DEVICE(0x22b2, info), \
        INTEL_VGA_DEVICE(0x22b3, info)
 
+#define INTEL_SKL_IDS(info) \
+       INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+       INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
+       INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+       INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
+       INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+       INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
+       INTEL_VGA_DEVICE(0x1912, info), /* DT  GT2 */ \
+       INTEL_VGA_DEVICE(0x1902, info), /* DT  GT1 */ \
+       INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
+       INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+       INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+       INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
+       INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
+       INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
+       INTEL_VGA_DEVICE(0x191D, info)  /* WKS GT2 */
+
 #endif /* _I915_PCIIDS_H */