Merge branch 'vmwgfx-next-3.13' of git://people.freedesktop.org/~thomash/linux into...
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_pm.c
index 63b3f5e..0a07d7c 100644 (file)
 #include <linux/module.h>
 #include <drm/i915_powerwell.h>
 
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE                       (1<<0)
+#define INTEL_RC6p_ENABLE                      (1<<1)
+#define INTEL_RC6pp_ENABLE                     (1<<2)
+
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
  * during in-memory transfers and, therefore, reduce the power packet.
@@ -233,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev)
                dpfc_ctl &= ~DPFC_CTL_EN;
                I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
-               if (IS_IVYBRIDGE(dev))
-                       /* WaFbcDisableDpfcClockGating:ivb */
-                       I915_WRITE(ILK_DSPCLK_GATE_D,
-                                  I915_READ(ILK_DSPCLK_GATE_D) &
-                                  ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
-
-               if (IS_HASWELL(dev))
-                       /* WaFbcDisableDpfcClockGating:hsw */
-                       I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-                                  I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
-                                  ~HSW_DPFC_GATING_DISABLE);
-
                DRM_DEBUG_KMS("disabled FBC\n");
        }
 }
@@ -274,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        if (IS_IVYBRIDGE(dev)) {
                /* WaFbcAsynchFlipDisableFbcQueue:ivb */
                I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
-               /* WaFbcDisableDpfcClockGating:ivb */
-               I915_WRITE(ILK_DSPCLK_GATE_D,
-                          I915_READ(ILK_DSPCLK_GATE_D) |
-                          ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
        } else {
                /* WaFbcAsynchFlipDisableFbcQueue:hsw */
                I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
                           HSW_BYPASS_FBC_QUEUE);
-               /* WaFbcDisableDpfcClockGating:hsw */
-               I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-                          I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
-                          HSW_DPFC_GATING_DISABLE);
        }
 
        I915_WRITE(SNB_DPFC_CTL_SA,
@@ -2290,7 +2291,9 @@ static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
 
 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
 {
-       if (INTEL_INFO(dev)->gen >= 7)
+       if (INTEL_INFO(dev)->gen >= 8)
+               return 3072;
+       else if (INTEL_INFO(dev)->gen >= 7)
                return 768;
        else
                return 512;
@@ -2335,7 +2338,9 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
        }
 
        /* clamp to max that the registers can hold */
-       if (INTEL_INFO(dev)->gen >= 7)
+       if (INTEL_INFO(dev)->gen >= 8)
+               max = level == 0 ? 255 : 2047;
+       else if (INTEL_INFO(dev)->gen >= 7)
                /* IVB/HSW primary/sprite plane watermarks */
                max = level == 0 ? 127 : 1023;
        else if (!is_sprite)
@@ -2365,27 +2370,30 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
 }
 
 /* Calculate the maximum FBC watermark */
-static unsigned int ilk_fbc_wm_max(void)
+static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
 {
        /* max that registers can hold */
-       return 15;
+       if (INTEL_INFO(dev)->gen >= 8)
+               return 31;
+       else
+               return 15;
 }
 
-static void ilk_wm_max(struct drm_device *dev,
-                      int level,
-                      const struct intel_wm_config *config,
-                      enum intel_ddb_partitioning ddb_partitioning,
-                      struct hsw_wm_maximums *max)
+static void ilk_compute_wm_maximums(struct drm_device *dev,
+                                   int level,
+                                   const struct intel_wm_config *config,
+                                   enum intel_ddb_partitioning ddb_partitioning,
+                                   struct hsw_wm_maximums *max)
 {
        max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
        max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
        max->cur = ilk_cursor_wm_max(dev, level, config);
-       max->fbc = ilk_fbc_wm_max();
+       max->fbc = ilk_fbc_wm_max(dev);
 }
 
-static bool ilk_check_wm(int level,
-                        const struct hsw_wm_maximums *max,
-                        struct intel_wm_level *result)
+static bool ilk_validate_wm_level(int level,
+                                 const struct hsw_wm_maximums *max,
+                                 struct intel_wm_level *result)
 {
        bool ret;
 
@@ -2626,7 +2634,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
        struct hsw_wm_maximums max;
 
        /* LP0 watermarks always use 1/2 DDB partitioning */
-       ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
 
        for (level = 0; level <= max_level; level++)
                ilk_compute_wm_level(dev_priv, level, params,
@@ -2635,7 +2643,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
        pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
 
        /* At least LP0 must be valid */
-       return ilk_check_wm(0, &max, &pipe_wm->wm[0]);
+       return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
 }
 
 /*
@@ -2680,7 +2688,7 @@ static void ilk_wm_merge(struct drm_device *dev,
 
                ilk_merge_wm_level(dev, level, wm);
 
-               if (!ilk_check_wm(level, max, wm))
+               if (!ilk_validate_wm_level(level, max, wm))
                        break;
 
                /*
@@ -2721,10 +2729,18 @@ static void hsw_compute_wm_results(struct drm_device *dev,
                if (!r->enable)
                        break;
 
-               results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
-                                                         r->fbc_val,
-                                                         r->pri_val,
-                                                         r->cur_val);
+               results->wm_lp[wm_lp - 1] = WM3_LP_EN |
+                       ((level * 2) << WM1_LP_LATENCY_SHIFT) |
+                       (r->pri_val << WM1_LP_SR_SHIFT) |
+                       r->cur_val;
+
+               if (INTEL_INFO(dev)->gen >= 8)
+                       results->wm_lp[wm_lp - 1] |=
+                               r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
+               else
+                       results->wm_lp[wm_lp - 1] |=
+                               r->fbc_val << WM1_LP_FBC_SHIFT;
+
                results->wm_lp_spr[wm_lp - 1] = r->spr_val;
        }
 
@@ -2927,12 +2943,13 @@ static void haswell_update_wm(struct drm_crtc *crtc)
 
        intel_crtc->wm.active = pipe_wm;
 
-       ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
        ilk_wm_merge(dev, &max, &lp_wm_1_2);
 
        /* 5/6 split only in single pipe config on IVB+ */
-       if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active == 1) {
-               ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+       if (INTEL_INFO(dev)->gen >= 7 &&
+           config.num_pipes_active == 1 && config.sprites_enabled) {
+               ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
                ilk_wm_merge(dev, &max, &lp_wm_5_6);
 
                best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
@@ -3684,6 +3701,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
        }
 }
 
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+       if (IS_GEN6(dev))
+               DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+
+       if (IS_HASWELL(dev))
+               DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+
+       DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+                       (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+                       (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+                       (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+}
+
 int intel_enable_rc6(const struct drm_device *dev)
 {
        /* No RC6 before Ironlake */
@@ -3698,18 +3729,13 @@ int intel_enable_rc6(const struct drm_device *dev)
        if (INTEL_INFO(dev)->gen == 5)
                return 0;
 
-       if (IS_HASWELL(dev)) {
-               DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+       if (IS_HASWELL(dev))
                return INTEL_RC6_ENABLE;
-       }
 
        /* snb/ivb have more than one rc6 state. */
-       if (INTEL_INFO(dev)->gen == 6) {
-               DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+       if (INTEL_INFO(dev)->gen == 6)
                return INTEL_RC6_ENABLE;
-       }
 
-       DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
        return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
@@ -3736,6 +3762,78 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
        I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
 }
 
+static void gen8_enable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       uint32_t rc6_mask = 0, rp_state_cap;
+       int unused;
+
+       /* 1a: Software RC state - RC0 */
+       I915_WRITE(GEN6_RC_STATE, 0);
+
+       /* 1c & 1d: Get forcewake during program sequence. Although the driver
+        * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+       gen6_gt_force_wake_get(dev_priv);
+
+       /* 2a: Disable RC states. */
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+
+       rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+
+       /* 2b: Program RC6 thresholds.*/
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+       for_each_ring(ring, dev_priv, unused)
+               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+       I915_WRITE(GEN6_RC_SLEEP, 0);
+       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+       /* 3: Enable RC6 */
+       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+               rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+       DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+       I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                       GEN6_RC_CTL_EI_MODE(1) |
+                       rc6_mask);
+
+       /* 4 Program defaults and thresholds for RPS*/
+       I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
+       I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
+       /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
+
+       /* Docs recommend 900MHz, and 300 MHz respectively */
+       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+                  dev_priv->rps.max_delay << 24 |
+                  dev_priv->rps.min_delay << 16);
+
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
+       I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
+
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+       /* 5: Enable RPS */
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_TURBO |
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX |
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_AVG);
+
+       /* 6: Ring frequency + overclocking (our driver does this later */
+
+       gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
+
+       gen6_enable_rps_interrupts(dev);
+
+       gen6_gt_force_wake_put(dev_priv);
+}
+
 static void gen6_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3811,10 +3909,7 @@ static void gen6_enable_rps(struct drm_device *dev)
                        rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
        }
 
-       DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-                       (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-                       (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-                       (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+       intel_print_rc6_info(dev, rc6_mask);
 
        I915_WRITE(GEN6_RC_CONTROL,
                   rc6_mask |
@@ -3887,7 +3982,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
        /* Convert from kHz to MHz */
        max_ia_freq /= 1000;
 
-       min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf;
+       min_ring_freq = I915_READ(DCLK) & 0xf;
        /* convert DDR frequency from units of 266.6MHz to bandwidth */
        min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
@@ -3901,7 +3996,10 @@ void gen6_update_ring_freq(struct drm_device *dev)
                int diff = dev_priv->rps.max_delay - gpu_freq;
                unsigned int ia_freq = 0, ring_freq = 0;
 
-               if (IS_HASWELL(dev)) {
+               if (INTEL_INFO(dev)->gen >= 8) {
+                       /* max(2 * GT, DDR). NB: GT is 50MHz units */
+                       ring_freq = max(min_ring_freq, gpu_freq);
+               } else if (IS_HASWELL(dev)) {
                        ring_freq = mult_frac(gpu_freq, 5, 4);
                        ring_freq = max(min_ring_freq, ring_freq);
                        /* leave ia_freq as the default, chosen by cpufreq */
@@ -4050,6 +4148,9 @@ static void valleyview_enable_rps(struct drm_device *dev)
                                      VLV_RENDER_RC6_COUNT_EN));
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mode = GEN7_RC_CTL_TO_MODE;
+
+       intel_print_rc6_info(dev, rc6_mode);
+
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
        val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -4221,6 +4322,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 
        I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
        I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+       intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
 }
 
 static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4860,6 +4963,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
 
        if (IS_VALLEYVIEW(dev)) {
                valleyview_enable_rps(dev);
+       } else if (IS_BROADWELL(dev)) {
+               gen8_enable_rps(dev);
+               gen6_update_ring_freq(dev);
        } else {
                gen6_enable_rps(dev);
                gen6_update_ring_freq(dev);
@@ -4995,7 +5101,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
         * gating for the panel power sequencer or it will fail to
         * start up when no ports are active.
         */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+                  PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+                  PCH_CPUNIT_CLOCK_GATE_DISABLE);
        I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
                   DPLS_EDP_PPS_FIX_DIS);
        /* The below fixes the weird display corruption, a few pixels shifted
@@ -5166,6 +5274,50 @@ static void lpt_suspend_hw(struct drm_device *dev)
        }
 }
 
+static void gen8_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe i;
+
+       I915_WRITE(WM3_LP_ILK, 0);
+       I915_WRITE(WM2_LP_ILK, 0);
+       I915_WRITE(WM1_LP_ILK, 0);
+
+       /* FIXME(BDW): Check all the w/a, some might only apply to
+        * pre-production hw. */
+
+       WARN(!i915_preliminary_hw_support,
+            "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
+       I915_WRITE(HALF_SLICE_CHICKEN3,
+                  _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
+       I915_WRITE(HALF_SLICE_CHICKEN3,
+                  _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
+       I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
+
+       I915_WRITE(_3D_CHICKEN3,
+                  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+
+       I915_WRITE(COMMON_SLICE_CHICKEN2,
+                  _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
+
+       I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+                  _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
+
+       /* WaSwitchSolVfFArbitrationPriority */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+       /* WaPsrDPAMaskVBlankInSRD */
+       I915_WRITE(CHICKEN_PAR1_1,
+                  I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
+
+       /* WaPsrDPRSUnmaskVBlankInSRD */
+       for_each_pipe(i) {
+               I915_WRITE(CHICKEN_PIPESL_1(i),
+                          I915_READ(CHICKEN_PIPESL_1(i) |
+                                    DPRS_MASK_VBLANK_SRD));
+       }
+}
+
 static void haswell_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5189,6 +5341,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
                        GEN7_WA_L3_CHICKEN_MODE);
 
+       /* L3 caching of data atomics doesn't work -- disable it. */
+       I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+       I915_WRITE(HSW_ROW_CHICKEN3,
+                  _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
+
        /* This is required by WaCatErrorRejectionIssue:hsw */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
                        I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -5484,6 +5641,25 @@ void intel_suspend_hw(struct drm_device *dev)
                lpt_suspend_hw(dev);
 }
 
+static bool is_always_on_power_domain(struct drm_device *dev,
+                                     enum intel_display_power_domain domain)
+{
+       unsigned long always_on_domains;
+
+       BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+
+       if (IS_BROADWELL(dev)) {
+               always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
+       } else if (IS_HASWELL(dev)) {
+               always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
+       } else {
+               WARN_ON(1);
+               return true;
+       }
+
+       return BIT(domain) & always_on_domains;
+}
+
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -5497,24 +5673,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
        if (!HAS_POWER_WELL(dev))
                return true;
 
-       switch (domain) {
-       case POWER_DOMAIN_PIPE_A:
-       case POWER_DOMAIN_TRANSCODER_EDP:
+       if (is_always_on_power_domain(dev, domain))
                return true;
-       case POWER_DOMAIN_VGA:
-       case POWER_DOMAIN_PIPE_B:
-       case POWER_DOMAIN_PIPE_C:
-       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-       case POWER_DOMAIN_TRANSCODER_A:
-       case POWER_DOMAIN_TRANSCODER_B:
-       case POWER_DOMAIN_TRANSCODER_C:
-               return I915_READ(HSW_PWR_WELL_DRIVER) ==
+
+       return I915_READ(HSW_PWR_WELL_DRIVER) ==
                     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-       default:
-               BUG();
-       }
 }
 
 static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5564,169 +5727,130 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
        }
 }
 
-static void __intel_power_well_get(struct i915_power_well *power_well)
+static void __intel_power_well_get(struct drm_device *dev,
+                                  struct i915_power_well *power_well)
 {
        if (!power_well->count++)
-               __intel_set_power_well(power_well->device, true);
+               __intel_set_power_well(dev, true);
 }
 
-static void __intel_power_well_put(struct i915_power_well *power_well)
+static void __intel_power_well_put(struct drm_device *dev,
+                                  struct i915_power_well *power_well)
 {
        WARN_ON(!power_well->count);
-       if (!--power_well->count)
-               __intel_set_power_well(power_well->device, false);
+       if (!--power_well->count && i915_disable_power_well)
+               __intel_set_power_well(dev, false);
 }
 
 void intel_display_power_get(struct drm_device *dev,
                             enum intel_display_power_domain domain)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_power_well *power_well = &dev_priv->power_well;
+       struct i915_power_domains *power_domains;
 
        if (!HAS_POWER_WELL(dev))
                return;
 
-       switch (domain) {
-       case POWER_DOMAIN_PIPE_A:
-       case POWER_DOMAIN_TRANSCODER_EDP:
-               return;
-       case POWER_DOMAIN_VGA:
-       case POWER_DOMAIN_PIPE_B:
-       case POWER_DOMAIN_PIPE_C:
-       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-       case POWER_DOMAIN_TRANSCODER_A:
-       case POWER_DOMAIN_TRANSCODER_B:
-       case POWER_DOMAIN_TRANSCODER_C:
-               spin_lock_irq(&power_well->lock);
-               __intel_power_well_get(power_well);
-               spin_unlock_irq(&power_well->lock);
+       if (is_always_on_power_domain(dev, domain))
                return;
-       default:
-               BUG();
-       }
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+       __intel_power_well_get(dev, &power_domains->power_wells[0]);
+       mutex_unlock(&power_domains->lock);
 }
 
 void intel_display_power_put(struct drm_device *dev,
                             enum intel_display_power_domain domain)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_power_well *power_well = &dev_priv->power_well;
+       struct i915_power_domains *power_domains;
 
        if (!HAS_POWER_WELL(dev))
                return;
 
-       switch (domain) {
-       case POWER_DOMAIN_PIPE_A:
-       case POWER_DOMAIN_TRANSCODER_EDP:
-               return;
-       case POWER_DOMAIN_VGA:
-       case POWER_DOMAIN_PIPE_B:
-       case POWER_DOMAIN_PIPE_C:
-       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-       case POWER_DOMAIN_TRANSCODER_A:
-       case POWER_DOMAIN_TRANSCODER_B:
-       case POWER_DOMAIN_TRANSCODER_C:
-               spin_lock_irq(&power_well->lock);
-               __intel_power_well_put(power_well);
-               spin_unlock_irq(&power_well->lock);
+       if (is_always_on_power_domain(dev, domain))
                return;
-       default:
-               BUG();
-       }
+
+       power_domains = &dev_priv->power_domains;
+
+       mutex_lock(&power_domains->lock);
+       __intel_power_well_put(dev, &power_domains->power_wells[0]);
+       mutex_unlock(&power_domains->lock);
 }
 
-static struct i915_power_well *hsw_pwr;
+static struct i915_power_domains *hsw_pwr;
 
 /* Display audio driver power well request */
 void i915_request_power_well(void)
 {
+       struct drm_i915_private *dev_priv;
+
        if (WARN_ON(!hsw_pwr))
                return;
 
-       spin_lock_irq(&hsw_pwr->lock);
-       __intel_power_well_get(hsw_pwr);
-       spin_unlock_irq(&hsw_pwr->lock);
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       mutex_lock(&hsw_pwr->lock);
+       __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
+       mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
 /* Display audio driver power well release */
 void i915_release_power_well(void)
 {
+       struct drm_i915_private *dev_priv;
+
        if (WARN_ON(!hsw_pwr))
                return;
 
-       spin_lock_irq(&hsw_pwr->lock);
-       __intel_power_well_put(hsw_pwr);
-       spin_unlock_irq(&hsw_pwr->lock);
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       mutex_lock(&hsw_pwr->lock);
+       __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
+       mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
-int i915_init_power_well(struct drm_device *dev)
+int intel_power_domains_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
 
-       hsw_pwr = &dev_priv->power_well;
+       mutex_init(&power_domains->lock);
+       hsw_pwr = power_domains;
 
-       hsw_pwr->device = dev;
-       spin_lock_init(&hsw_pwr->lock);
-       hsw_pwr->count = 0;
+       power_well = &power_domains->power_wells[0];
+       power_well->count = 0;
 
        return 0;
 }
 
-void i915_remove_power_well(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_device *dev)
 {
        hsw_pwr = NULL;
 }
 
-void intel_set_power_well(struct drm_device *dev, bool enable)
+static void intel_power_domains_resume(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_power_well *power_well = &dev_priv->power_well;
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
 
        if (!HAS_POWER_WELL(dev))
                return;
 
-       if (!i915_disable_power_well && !enable)
-               return;
-
-       spin_lock_irq(&power_well->lock);
+       mutex_lock(&power_domains->lock);
 
-       /*
-        * This function will only ever contribute one
-        * to the power well reference count. i915_request
-        * is what tracks whether we have or have not
-        * added the one to the reference count.
-        */
-       if (power_well->i915_request == enable)
-               goto out;
-
-       power_well->i915_request = enable;
-
-       if (enable)
-               __intel_power_well_get(power_well);
-       else
-               __intel_power_well_put(power_well);
-
- out:
-       spin_unlock_irq(&power_well->lock);
-}
-
-static void intel_resume_power_well(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_power_well *power_well = &dev_priv->power_well;
-
-       if (!HAS_POWER_WELL(dev))
-               return;
-
-       spin_lock_irq(&power_well->lock);
+       power_well = &power_domains->power_wells[0];
        __intel_set_power_well(dev, power_well->count > 0);
-       spin_unlock_irq(&power_well->lock);
+
+       mutex_unlock(&power_domains->lock);
 }
 
 /*
@@ -5735,7 +5859,7 @@ static void intel_resume_power_well(struct drm_device *dev)
  * to be enabled, and it will only be disabled if none of the registers is
  * requesting it to be enabled.
  */
-void intel_init_power_well(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -5743,8 +5867,8 @@ void intel_init_power_well(struct drm_device *dev)
                return;
 
        /* For now, we need the power well to be always enabled. */
-       intel_set_power_well(dev, true);
-       intel_resume_power_well(dev);
+       intel_display_set_init_power(dev, true);
+       intel_power_domains_resume(dev);
 
        /* We're taking over the BIOS, so clear any requests made by it since
         * the driver is in charge now. */
@@ -5848,6 +5972,8 @@ void intel_init_pm(struct drm_device *dev)
                                dev_priv->display.update_wm = NULL;
                        }
                        dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+               } else if (INTEL_INFO(dev)->gen == 8) {
+                       dev_priv->display.init_clock_gating = gen8_init_clock_gating;
                } else
                        dev_priv->display.update_wm = NULL;
        } else if (IS_VALLEYVIEW(dev)) {
@@ -6010,4 +6136,3 @@ void intel_pm_init(struct drm_device *dev)
        INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
                          intel_gen6_powersave_work);
 }
-