drm/i915: add POWER_DOMAIN_PLLS
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_pm.c
index 54242e4..55228df 100644 (file)
@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int cfb_pitch;
        int i;
@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
        dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+               dev_priv->fbc.threshold++;
+
+       switch (dev_priv->fbc.threshold) {
+       case 4:
+       case 3:
+               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+               break;
+       case 2:
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-       else
+               break;
+       case 1:
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+               break;
+       }
        dpfc_ctl |= DPFC_CTL_FENCE_EN;
        if (IS_GEN5(dev))
                dpfc_ctl |= obj->fence_reg;
@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
 
        dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+               dev_priv->fbc.threshold++;
+
+       switch (dev_priv->fbc.threshold) {
+       case 4:
+       case 3:
+               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+               break;
+       case 2:
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-       else
+               break;
+       case 1:
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+               break;
+       }
+
        dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
        struct drm_crtc *crtc = NULL, *tmp_crtc;
        struct intel_crtc *intel_crtc;
        struct drm_framebuffer *fb;
-       struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj;
        const struct drm_display_mode *adjusted_mode;
        unsigned int max_width, max_height;
@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
 
        intel_crtc = to_intel_crtc(crtc);
        fb = crtc->primary->fb;
-       intel_fb = to_intel_framebuffer(fb);
-       obj = intel_fb->obj;
+       obj = intel_fb_obj(fb);
        adjusted_mode = &intel_crtc->config.adjusted_mode;
 
        if (i915.enable_fbc < 0) {
@@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev)
                goto out_disable;
        }
 
-       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+               max_width = 4096;
+               max_height = 4096;
+       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
                max_width = 4096;
                max_height = 2048;
        } else {
@@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
        if (in_dbg_master())
                goto out_disable;
 
-       if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+       if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+                                             drm_format_plane_cpp(fb->pixel_format, 0))) {
                if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
                        DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
                goto out_disable;
@@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
        return NULL;
 }
 
-static void pineview_disable_cxsr(struct drm_device *dev)
+void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
+       u32 val;
 
-       /* deactivate cxsr */
-       I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+       if (IS_VALLEYVIEW(dev)) {
+               I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+       } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+               I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+       } else if (IS_PINEVIEW(dev)) {
+               val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
+               val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+               I915_WRITE(DSPFW3, val);
+       } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+               val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+                              _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+               I915_WRITE(FW_BLC_SELF, val);
+       } else if (IS_I915GM(dev)) {
+               val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+                              _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+               I915_WRITE(INSTPM, val);
+       } else {
+               return;
+       }
+
+       DRM_DEBUG_KMS("memory self-refresh is %s\n",
+                     enable ? "enabled" : "disabled");
 }
 
 /*
@@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
 
 /* Pineview has different values for various configs */
 static const struct intel_watermark_params pineview_display_wm = {
-       PINEVIEW_DISPLAY_FIFO,
-       PINEVIEW_MAX_WM,
-       PINEVIEW_DFT_WM,
-       PINEVIEW_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_DISPLAY_FIFO,
+       .max_wm = PINEVIEW_MAX_WM,
+       .default_wm = PINEVIEW_DFT_WM,
+       .guard_size = PINEVIEW_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_display_hplloff_wm = {
-       PINEVIEW_DISPLAY_FIFO,
-       PINEVIEW_MAX_WM,
-       PINEVIEW_DFT_HPLLOFF_WM,
-       PINEVIEW_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_DISPLAY_FIFO,
+       .max_wm = PINEVIEW_MAX_WM,
+       .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+       .guard_size = PINEVIEW_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_cursor_wm = {
-       PINEVIEW_CURSOR_FIFO,
-       PINEVIEW_CURSOR_MAX_WM,
-       PINEVIEW_CURSOR_DFT_WM,
-       PINEVIEW_CURSOR_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE,
+       .fifo_size = PINEVIEW_CURSOR_FIFO,
+       .max_wm = PINEVIEW_CURSOR_MAX_WM,
+       .default_wm = PINEVIEW_CURSOR_DFT_WM,
+       .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
-       PINEVIEW_CURSOR_FIFO,
-       PINEVIEW_CURSOR_MAX_WM,
-       PINEVIEW_CURSOR_DFT_WM,
-       PINEVIEW_CURSOR_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_CURSOR_FIFO,
+       .max_wm = PINEVIEW_CURSOR_MAX_WM,
+       .default_wm = PINEVIEW_CURSOR_DFT_WM,
+       .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params g4x_wm_info = {
-       G4X_FIFO_SIZE,
-       G4X_MAX_WM,
-       G4X_MAX_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = G4X_FIFO_SIZE,
+       .max_wm = G4X_MAX_WM,
+       .default_wm = G4X_MAX_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params g4x_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       I965_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = I965_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params valleyview_wm_info = {
-       VALLEYVIEW_FIFO_SIZE,
-       VALLEYVIEW_MAX_WM,
-       VALLEYVIEW_MAX_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = VALLEYVIEW_FIFO_SIZE,
+       .max_wm = VALLEYVIEW_MAX_WM,
+       .default_wm = VALLEYVIEW_MAX_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params valleyview_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       VALLEYVIEW_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i965_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       I965_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       I915_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = I965_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i945_wm_info = {
-       I945_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I915_FIFO_LINE_SIZE
+       .fifo_size = I945_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i915_wm_info = {
-       I915_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I915_FIFO_LINE_SIZE
+       .fifo_size = I915_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i830_wm_info = {
-       I855GM_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I830_FIFO_LINE_SIZE
+       .fifo_size = I855GM_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I830_FIFO_LINE_SIZE,
 };
 static const struct intel_watermark_params i845_wm_info = {
-       I830_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I830_FIFO_LINE_SIZE
+       .fifo_size = I830_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I830_FIFO_LINE_SIZE,
 };
 
 /**
@@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
                                         dev_priv->fsb_freq, dev_priv->mem_freq);
        if (!latency) {
                DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-               pineview_disable_cxsr(dev);
+               intel_set_memory_cxsr(dev_priv, false);
                return;
        }
 
@@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
                I915_WRITE(DSPFW3, reg);
                DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
-               /* activate cxsr */
-               I915_WRITE(DSPFW3,
-                          I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
-               DRM_DEBUG_KMS("Self-refresh is enabled\n");
+               intel_set_memory_cxsr(dev_priv, true);
        } else {
-               pineview_disable_cxsr(dev);
-               DRM_DEBUG_KMS("Self-refresh is disabled\n");
+               intel_set_memory_cxsr(dev_priv, false);
        }
 }
 
@@ -1316,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
        int plane_sr, cursor_sr;
        int ignore_plane_sr, ignore_cursor_sr;
        unsigned int enabled = 0;
+       bool cxsr_enabled;
 
        vlv_update_drain_latency(dev);
 
@@ -1342,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
                             &valleyview_wm_info,
                             &valleyview_cursor_wm_info,
                             &ignore_plane_sr, &cursor_sr)) {
-               I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+               cxsr_enabled = true;
        } else {
-               I915_WRITE(FW_BLC_SELF_VLV,
-                          I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+               cxsr_enabled = false;
+               intel_set_memory_cxsr(dev_priv, false);
                plane_sr = cursor_sr = 0;
        }
 
@@ -1365,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
        I915_WRITE(DSPFW3,
                   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+       if (cxsr_enabled)
+               intel_set_memory_cxsr(dev_priv, true);
 }
 
 static void g4x_update_wm(struct drm_crtc *crtc)
@@ -1375,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
        int plane_sr, cursor_sr;
        unsigned int enabled = 0;
+       bool cxsr_enabled;
 
        if (g4x_compute_wm0(dev, PIPE_A,
                            &g4x_wm_info, latency_ns,
@@ -1394,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
                             &g4x_wm_info,
                             &g4x_cursor_wm_info,
                             &plane_sr, &cursor_sr)) {
-               I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+               cxsr_enabled = true;
        } else {
-               I915_WRITE(FW_BLC_SELF,
-                          I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+               cxsr_enabled = false;
+               intel_set_memory_cxsr(dev_priv, false);
                plane_sr = cursor_sr = 0;
        }
 
@@ -1418,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
        I915_WRITE(DSPFW3,
                   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+       if (cxsr_enabled)
+               intel_set_memory_cxsr(dev_priv, true);
 }
 
 static void i965_update_wm(struct drm_crtc *unused_crtc)
@@ -1427,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
        struct drm_crtc *crtc;
        int srwm = 1;
        int cursor_sr = 16;
+       bool cxsr_enabled;
 
        /* Calc sr entries for one plane configs */
        crtc = single_enabled_crtc(dev);
@@ -1468,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
                DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
                              "cursor %d\n", srwm, cursor_sr);
 
-               if (IS_CRESTLINE(dev))
-                       I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+               cxsr_enabled = true;
        } else {
+               cxsr_enabled = false;
                /* Turn off self refresh if both pipes are enabled */
-               if (IS_CRESTLINE(dev))
-                       I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
-                                  & ~FW_BLC_SELF_EN);
+               intel_set_memory_cxsr(dev_priv, false);
        }
 
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -1486,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
        I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
        /* update cursor SR watermark */
        I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+       if (cxsr_enabled)
+               intel_set_memory_cxsr(dev_priv, true);
 }
 
 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
@@ -1545,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
        DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
        if (IS_I915GM(dev) && enabled) {
-               struct intel_framebuffer *fb;
+               struct drm_i915_gem_object *obj;
 
-               fb = to_intel_framebuffer(enabled->primary->fb);
+               obj = intel_fb_obj(enabled->primary->fb);
 
                /* self-refresh seems busted with untiled */
-               if (fb->obj->tiling_mode == I915_TILING_NONE)
+               if (obj->tiling_mode == I915_TILING_NONE)
                        enabled = NULL;
        }
 
@@ -1560,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
        cwm = 2;
 
        /* Play safe and disable self-refresh before adjusting watermarks. */
-       if (IS_I945G(dev) || IS_I945GM(dev))
-               I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
-       else if (IS_I915GM(dev))
-               I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
+       intel_set_memory_cxsr(dev_priv, false);
 
        /* Calc sr entries for one plane configs */
        if (HAS_FW_BLC(dev) && enabled) {
@@ -1609,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
        I915_WRITE(FW_BLC, fwater_lo);
        I915_WRITE(FW_BLC2, fwater_hi);
 
-       if (HAS_FW_BLC(dev)) {
-               if (enabled) {
-                       if (IS_I945G(dev) || IS_I945GM(dev))
-                               I915_WRITE(FW_BLC_SELF,
-                                          FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
-                       else if (IS_I915GM(dev))
-                               I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
-                       DRM_DEBUG_KMS("memory self refresh enabled\n");
-               } else
-                       DRM_DEBUG_KMS("memory self refresh disabled\n");
-       }
+       if (enabled)
+               intel_set_memory_cxsr(dev_priv, true);
 }
 
 static void i845_update_wm(struct drm_crtc *unused_crtc)
@@ -3209,6 +3245,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
 */
 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+
+       /* Latest VLV doesn't need to force the gfx clock */
+       if (dev->pdev->revision >= 0xd) {
+               valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+               return;
+       }
+
        /*
         * When we are idle.  Drop to min voltage state.
         */
@@ -3232,8 +3276,11 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 
        vlv_force_gfx_clock(dev_priv, false);
 
-       I915_WRITE(GEN6_PMINTRMSK,
-                  gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+       if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
+               I915_WRITE(GEN6_PMINTRMSK, ~dev_priv->pm_rps_events);
+       else 
+               I915_WRITE(GEN6_PMINTRMSK,
+                          gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
 }
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3340,6 +3387,13 @@ static void gen6_disable_rps(struct drm_device *dev)
                gen6_disable_rps_interrupts(dev);
 }
 
+static void cherryview_disable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+}
+
 static void valleyview_disable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3475,15 +3529,23 @@ static void gen8_enable_rps(struct drm_device *dev)
        for_each_ring(ring, dev_priv, unused)
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
        I915_WRITE(GEN6_RC_SLEEP, 0);
-       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+       if (IS_BROADWELL(dev))
+               I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+       else
+               I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
 
        /* 3: Enable RC6 */
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
        intel_print_rc6_info(dev, rc6_mask);
-       I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-                                   GEN6_RC_CTL_EI_MODE(1) |
-                                   rc6_mask);
+       if (IS_BROADWELL(dev))
+               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                               GEN7_RC_CTL_TO_MODE |
+                               rc6_mask);
+       else
+               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                               GEN6_RC_CTL_EI_MODE(1) |
+                               rc6_mask);
 
        /* 4 Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RPNSWREQ,
@@ -3719,6 +3781,35 @@ void gen6_update_ring_freq(struct drm_device *dev)
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
+int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rp0;
+
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+       rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+       return rp0;
+}
+
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rpe;
+
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+       rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+
+       return rpe;
+}
+
+int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+       u32 val, rpn;
+
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+       rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+       return rpn;
+}
+
 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
 {
        u32 val, rp0;
@@ -3758,6 +3849,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
                             dev_priv->vlv_pctx->stolen->start);
 }
 
+
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+       unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+       WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
+
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long pctx_paddr, paddr;
+       struct i915_gtt *gtt = &dev_priv->gtt;
+       u32 pcbr;
+       int pctx_size = 32*1024;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       pcbr = I915_READ(VLV_PCBR);
+       if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+               paddr = (dev_priv->mm.stolen_base +
+                        (gtt->stolen_size - pctx_size));
+
+               pctx_paddr = (paddr & (~4095));
+               I915_WRITE(VLV_PCBR, pctx_paddr);
+       }
+}
+
 static void valleyview_setup_pctx(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3847,11 +3967,135 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
+static void cherryview_init_gt_powersave(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       cherryview_setup_pctx(dev);
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+                        dev_priv->rps.max_freq);
+
+       dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
+
+       dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+                        dev_priv->rps.min_freq);
+
+       /* Preserve min/max settings in case of re-init */
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
 {
        valleyview_cleanup_pctx(dev);
 }
 
+static void cherryview_enable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
+       u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+       int i;
+
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+       gtfifodbg = I915_READ(GTFIFODBG);
+       if (gtfifodbg) {
+               DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+                                gtfifodbg);
+               I915_WRITE(GTFIFODBG, gtfifodbg);
+       }
+
+       cherryview_check_pctx(dev_priv);
+
+       /* 1a & 1b: Get forcewake during program sequence. Although the driver
+        * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+       /* 2a: Program RC6 thresholds.*/
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+       for_each_ring(ring, dev_priv, i)
+               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+       I915_WRITE(GEN6_RC_SLEEP, 0);
+
+       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+       /* allows RC6 residency counter to work */
+       I915_WRITE(VLV_COUNTER_CONTROL,
+                  _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+                                     VLV_MEDIA_RC6_COUNT_EN |
+                                     VLV_RENDER_RC6_COUNT_EN));
+
+       /* For now we assume BIOS is allocating and populating the PCBR  */
+       pcbr = I915_READ(VLV_PCBR);
+
+       DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+
+       /* 3: Enable RC6 */
+       if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+                                               (pcbr >> VLV_PCBR_ADDR_SHIFT))
+               rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+       I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+       /* 4 Program defaults and thresholds for RPS*/
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+       I915_WRITE(GEN6_RP_UP_EI, 66000);
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+       /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+       I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+       I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+
+       /* 5: Enable RPS */
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_AVG);
+
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq);
+
+       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
+
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
 static void valleyview_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3878,6 +4122,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
        I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
 
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
@@ -3898,9 +4143,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
 
        /* allows RC6 residency counter to work */
        I915_WRITE(VLV_COUNTER_CONTROL,
-                  _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+                  _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+                                     VLV_RENDER_RC0_COUNT_EN |
                                      VLV_MEDIA_RC6_COUNT_EN |
                                      VLV_RENDER_RC6_COUNT_EN));
+
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
@@ -4660,33 +4907,57 @@ void intel_init_gt_powersave(struct drm_device *dev)
 {
        i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
 
-       if (IS_VALLEYVIEW(dev))
+       if (IS_CHERRYVIEW(dev))
+               cherryview_init_gt_powersave(dev);
+       else if (IS_VALLEYVIEW(dev))
                valleyview_init_gt_powersave(dev);
 }
 
 void intel_cleanup_gt_powersave(struct drm_device *dev)
 {
-       if (IS_VALLEYVIEW(dev))
+       if (IS_CHERRYVIEW(dev))
+               return;
+       else if (IS_VALLEYVIEW(dev))
                valleyview_cleanup_gt_powersave(dev);
 }
 
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Interrupts should be disabled already to avoid re-arming. */
+       WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled);
+
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+       cancel_work_sync(&dev_priv->rps.work);
+}
+
 void intel_disable_gt_powersave(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* Interrupts should be disabled already to avoid re-arming. */
-       WARN_ON(dev->irq_enabled);
+       WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled);
 
        if (IS_IRONLAKE_M(dev)) {
                ironlake_disable_drps(dev);
                ironlake_disable_rc6(dev);
-       } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
-               if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
-                       intel_runtime_pm_put(dev_priv);
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               intel_suspend_gt_powersave(dev);
 
-               cancel_work_sync(&dev_priv->rps.work);
                mutex_lock(&dev_priv->rps.hw_lock);
-               if (IS_VALLEYVIEW(dev))
+               if (IS_CHERRYVIEW(dev))
+                       cherryview_disable_rps(dev);
+               else if (IS_VALLEYVIEW(dev))
                        valleyview_disable_rps(dev);
                else
                        gen6_disable_rps(dev);
@@ -4704,7 +4975,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
-       if (IS_VALLEYVIEW(dev)) {
+       if (IS_CHERRYVIEW(dev)) {
+               cherryview_enable_rps(dev);
+       } else if (IS_VALLEYVIEW(dev)) {
                valleyview_enable_rps(dev);
        } else if (IS_BROADWELL(dev)) {
                gen8_enable_rps(dev);
@@ -4729,7 +5002,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
                ironlake_enable_rc6(dev);
                intel_init_emon(dev);
                mutex_unlock(&dev->struct_mutex);
-       } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+       } else if (INTEL_INFO(dev)->gen >= 6) {
                /*
                 * PCU communication is slow and this doesn't need to be
                 * done at any specific time, so do this out of our fast path
@@ -5102,7 +5375,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
 
        I915_WRITE(_3D_CHICKEN3,
-                  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
 
        I915_WRITE(COMMON_SLICE_CHICKEN2,
                   _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@ -5337,10 +5610,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
        }
        DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
-       dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
-       DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
-                        dev_priv->vlv_cdclk_freq);
-
        I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
        /* WaDisableEarlyCull:vlv */
@@ -5603,8 +5872,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
                     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 }
 
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
-                                   enum intel_display_power_domain domain)
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+                                         enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
@@ -5615,16 +5884,19 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
                return false;
 
        power_domains = &dev_priv->power_domains;
+
        is_enabled = true;
+
        for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
                if (power_well->always_on)
                        continue;
 
-               if (!power_well->count) {
+               if (!power_well->hw_enabled) {
                        is_enabled = false;
                        break;
                }
        }
+
        return is_enabled;
 }
 
@@ -5632,30 +5904,15 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
                                 enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       bool is_enabled;
-       int i;
-
-       if (dev_priv->pm.suspended)
-               return false;
+       bool ret;
 
        power_domains = &dev_priv->power_domains;
 
-       is_enabled = true;
-
        mutex_lock(&power_domains->lock);
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-               if (power_well->always_on)
-                       continue;
-
-               if (!power_well->ops->is_enabled(dev_priv, power_well)) {
-                       is_enabled = false;
-                       break;
-               }
-       }
+       ret = intel_display_power_enabled_unlocked(dev_priv, domain);
        mutex_unlock(&power_domains->lock);
 
-       return is_enabled;
+       return ret;
 }
 
 /*
@@ -5768,34 +6025,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
        return true;
 }
 
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
-                         enum punit_power_well power_well_id, bool enable)
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
 {
-       struct drm_device *dev = dev_priv->dev;
+       enum punit_power_well power_well_id = power_well->data;
        u32 mask;
        u32 state;
        u32 ctrl;
-       enum pipe pipe;
-
-       if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
-               if (enable) {
-                       /*
-                        * Enable the CRI clock source so we can get at the
-                        * display and the reference clock for VGA
-                        * hotplug / manual detection.
-                        */
-                       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                                  DPLL_REFA_CLK_ENABLE_VLV |
-                                  DPLL_INTEGRATED_CRI_CLK_VLV);
-                       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-               } else {
-                       for_each_pipe(pipe)
-                               assert_pll_disabled(dev_priv, pipe);
-                       /* Assert common reset */
-                       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
-                                  ~DPIO_CMNRST);
-               }
-       }
 
        mask = PUNIT_PWRGT_MASK(power_well_id);
        state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5823,28 +6059,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
 
 out:
        mutex_unlock(&dev_priv->rps.hw_lock);
-
-       /*
-        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
-        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
-        *   b. The other bits such as sfr settings / modesel may all
-        *      be set to 0.
-        *
-        * This should only be done on init and resume from S3 with
-        * both PLLs disabled, or we risk losing DPIO and PLL
-        * synchronization.
-        */
-       if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
-               I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
-                              struct i915_power_well *power_well, bool enable)
-{
-       enum punit_power_well power_well_id = power_well->data;
-
-       __vlv_set_power_well(dev_priv, power_well_id, enable);
 }
 
 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5936,6 +6150,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
        vlv_set_power_well(dev_priv, power_well, false);
 }
 
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+       /*
+        * Enable the CRI clock source so we can get at the
+        * display and the reference clock for VGA
+        * hotplug / manual detection.
+        */
+       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                  DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+       vlv_set_power_well(dev_priv, power_well, true);
+
+       /*
+        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
+        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
+        *   b. The other bits such as sfr settings / modesel may all
+        *      be set to 0.
+        *
+        * This should only be done on init and resume from S3 with
+        * both PLLs disabled, or we risk losing DPIO and PLL
+        * synchronization.
+        */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       struct drm_device *dev = dev_priv->dev;
+       enum pipe pipe;
+
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+       for_each_pipe(pipe)
+               assert_pll_disabled(dev_priv, pipe);
+
+       /* Assert common reset */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+       vlv_set_power_well(dev_priv, power_well, false);
+}
+
 static void check_power_well_state(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
 {
@@ -5976,6 +6237,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
                if (!power_well->count++) {
                        DRM_DEBUG_KMS("enabling %s\n", power_well->name);
                        power_well->ops->enable(dev_priv, power_well);
+                       power_well->hw_enabled = true;
                }
 
                check_power_well_state(dev_priv, power_well);
@@ -6005,6 +6267,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 
                if (!--power_well->count && i915.disable_power_well) {
                        DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+                       power_well->hw_enabled = false;
                        power_well->ops->disable(dev_priv, power_well);
                }
 
@@ -6048,6 +6311,27 @@ int i915_release_power_well(void)
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
+
+
 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
 #define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
@@ -6062,6 +6346,7 @@ EXPORT_SYMBOL_GPL(i915_release_power_well);
        BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
        BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
        BIT(POWER_DOMAIN_PORT_CRT) |                    \
+       BIT(POWER_DOMAIN_PLLS) |                        \
        BIT(POWER_DOMAIN_INIT))
 #define HSW_DISPLAY_POWER_DOMAINS (                            \
        (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
@@ -6161,6 +6446,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
        .is_enabled = vlv_power_well_enabled,
 };
 
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_dpio_cmn_power_well_enable,
+       .disable = vlv_dpio_cmn_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+};
+
 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
        .sync_hw = vlv_power_well_sync_hw,
        .enable = vlv_power_well_enable,
@@ -6221,10 +6513,25 @@ static struct i915_power_well vlv_power_wells[] = {
                .name = "dpio-common",
                .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
                .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-               .ops = &vlv_dpio_power_well_ops,
+               .ops = &vlv_dpio_cmn_power_well_ops,
        },
 };
 
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+                                                enum punit_power_well power_well_id)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               if (power_well->data == power_well_id)
+                       return power_well;
+       }
+
+       return NULL;
+}
+
 #define set_power_wells(power_domains, __power_wells) ({               \
        (power_domains)->power_wells = (__power_wells);                 \
        (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
@@ -6267,16 +6574,58 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
        int i;
 
        mutex_lock(&power_domains->lock);
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
                power_well->ops->sync_hw(dev_priv, power_well);
+               power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+                                                                    power_well);
+       }
        mutex_unlock(&power_domains->lock);
 }
 
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *cmn =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+       struct i915_power_well *disp2d =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+       /* nothing to do if common lane is already off */
+       if (!cmn->ops->is_enabled(dev_priv, cmn))
+               return;
+
+       /* If the display might be already active skip this */
+       if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
+           I915_READ(DPIO_CTL) & DPIO_CMNRST)
+               return;
+
+       DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+       /* cmnlane needs DPLL registers */
+       disp2d->ops->enable(dev_priv, disp2d);
+
+       /*
+        * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+        * Need to assert and de-assert PHY SB reset by gating the
+        * common lane power, then un-gating it.
+        * Simply ungating isn't enough to reset the PHY enough to get
+        * ports and lanes running.
+        */
+       cmn->ops->disable(dev_priv, cmn);
+}
+
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
        power_domains->initializing = true;
+
+       if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+               mutex_lock(&power_domains->lock);
+               vlv_cmnlane_wa(dev_priv);
+               mutex_unlock(&power_domains->lock);
+       }
+
        /* For now, we need the power well to be always enabled. */
        intel_display_set_init_power(dev_priv, true);
        intel_power_domains_resume(dev_priv);
@@ -6449,7 +6798,7 @@ void intel_init_pm(struct drm_device *dev)
                                 (dev_priv->is_ddr3 == 1) ? "3" : "2",
                                 dev_priv->fsb_freq, dev_priv->mem_freq);
                        /* Disable CxSR and never update its watermark again */
-                       pineview_disable_cxsr(dev);
+                       intel_set_memory_cxsr(dev_priv, false);
                        dev_priv->display.update_wm = NULL;
                } else
                        dev_priv->display.update_wm = pineview_update_wm;