Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_psr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * DOC: Panel Self Refresh (PSR/SRD)
26  *
27  * Since Haswell Display controller supports Panel Self-Refresh on display
28  * panels witch have a remote frame buffer (RFB) implemented according to PSR
29  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30  * when system is idle but display is on as it eliminates display refresh
31  * request to DDR memory completely as long as the frame buffer for that
32  * display is unchanged.
33  *
34  * Panel Self Refresh must be supported by both Hardware (source) and
35  * Panel (sink).
36  *
37  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38  * to power down the link and memory controller. For DSI panels the same idea
39  * is called "manual mode".
40  *
41  * The implementation uses the hardware-based PSR support which automatically
42  * enters/exits self-refresh mode. The hardware takes care of sending the
43  * required DP aux message and could even retrain the link (that part isn't
44  * enabled yet though). The hardware also keeps track of any frontbuffer
45  * changes to know when to exit self-refresh mode again. Unfortunately that
46  * part doesn't work too well, hence why the i915 PSR support uses the
47  * software frontbuffer tracking to make sure it doesn't miss a screen
48  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49  * get called by the frontbuffer tracking code. Note that because of locking
50  * issues the self-refresh re-enable code is done from a work queue, which
51  * must be correctly synchronized/cancelled when shutting down the pipe."
52  */
53
54 #include <drm/drmP.h>
55
56 #include "intel_drv.h"
57 #include "i915_drv.h"
58
59 static bool is_edp_psr(struct intel_dp *intel_dp)
60 {
61         return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
62 }
63
64 static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
65 {
66         struct drm_i915_private *dev_priv = dev->dev_private;
67         uint32_t val;
68
69         val = I915_READ(VLV_PSRSTAT(pipe)) &
70               VLV_EDP_PSR_CURR_STATE_MASK;
71         return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
72                (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
73 }
74
75 static void intel_psr_write_vsc(struct intel_dp *intel_dp,
76                                 const struct edp_vsc_psr *vsc_psr)
77 {
78         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
79         struct drm_device *dev = dig_port->base.base.dev;
80         struct drm_i915_private *dev_priv = dev->dev_private;
81         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
82         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
83         i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
84         uint32_t *data = (uint32_t *) vsc_psr;
85         unsigned int i;
86
87         /* As per BSPec (Pipe Video Data Island Packet), we need to disable
88            the video DIP being updated before program video DIP data buffer
89            registers for DIP being updated. */
90         I915_WRITE(ctl_reg, 0);
91         POSTING_READ(ctl_reg);
92
93         for (i = 0; i < sizeof(*vsc_psr); i += 4) {
94                 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
95                                                    i >> 2), *data);
96                 data++;
97         }
98         for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
99                 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
100                                                    i >> 2), 0);
101
102         I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
103         POSTING_READ(ctl_reg);
104 }
105
106 static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
107 {
108         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109         struct drm_device *dev = intel_dig_port->base.base.dev;
110         struct drm_i915_private *dev_priv = dev->dev_private;
111         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
112         enum pipe pipe = to_intel_crtc(crtc)->pipe;
113         uint32_t val;
114
115         /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
116         val  = I915_READ(VLV_VSCSDP(pipe));
117         val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
118         val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
119         I915_WRITE(VLV_VSCSDP(pipe), val);
120 }
121
122 static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
123 {
124         struct edp_vsc_psr psr_vsc;
125
126         /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
127         memset(&psr_vsc, 0, sizeof(psr_vsc));
128         psr_vsc.sdp_header.HB0 = 0;
129         psr_vsc.sdp_header.HB1 = 0x7;
130         psr_vsc.sdp_header.HB2 = 0x3;
131         psr_vsc.sdp_header.HB3 = 0xb;
132         intel_psr_write_vsc(intel_dp, &psr_vsc);
133 }
134
135 static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
136 {
137         struct edp_vsc_psr psr_vsc;
138
139         /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
140         memset(&psr_vsc, 0, sizeof(psr_vsc));
141         psr_vsc.sdp_header.HB0 = 0;
142         psr_vsc.sdp_header.HB1 = 0x7;
143         psr_vsc.sdp_header.HB2 = 0x2;
144         psr_vsc.sdp_header.HB3 = 0x8;
145         intel_psr_write_vsc(intel_dp, &psr_vsc);
146 }
147
148 static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
149 {
150         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
151                            DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
152 }
153
154 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
155                                        enum port port)
156 {
157         if (INTEL_INFO(dev_priv)->gen >= 9)
158                 return DP_AUX_CH_CTL(port);
159         else
160                 return EDP_PSR_AUX_CTL;
161 }
162
163 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
164                                         enum port port, int index)
165 {
166         if (INTEL_INFO(dev_priv)->gen >= 9)
167                 return DP_AUX_CH_DATA(port, index);
168         else
169                 return EDP_PSR_AUX_DATA(index);
170 }
171
172 static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
173 {
174         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
175         struct drm_device *dev = dig_port->base.base.dev;
176         struct drm_i915_private *dev_priv = dev->dev_private;
177         uint32_t aux_clock_divider;
178         i915_reg_t aux_ctl_reg;
179         int precharge = 0x3;
180         static const uint8_t aux_msg[] = {
181                 [0] = DP_AUX_NATIVE_WRITE << 4,
182                 [1] = DP_SET_POWER >> 8,
183                 [2] = DP_SET_POWER & 0xff,
184                 [3] = 1 - 1,
185                 [4] = DP_SET_POWER_D0,
186         };
187         enum port port = dig_port->port;
188         int i;
189
190         BUILD_BUG_ON(sizeof(aux_msg) > 20);
191
192         aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
193
194         /* Enable AUX frame sync at sink */
195         if (dev_priv->psr.aux_frame_sync)
196                 drm_dp_dpcd_writeb(&intel_dp->aux,
197                                 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
198                                 DP_AUX_FRAME_SYNC_ENABLE);
199
200         aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
201
202         /* Setup AUX registers */
203         for (i = 0; i < sizeof(aux_msg); i += 4)
204                 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
205                            intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
206
207         if (INTEL_INFO(dev)->gen >= 9) {
208                 uint32_t val;
209
210                 val = I915_READ(aux_ctl_reg);
211                 val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
212                 val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
213                 val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
214                 val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
215                 /* Use hardcoded data values for PSR, frame sync and GTC */
216                 val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
217                 val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
218                 val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
219                 I915_WRITE(aux_ctl_reg, val);
220         } else {
221                 I915_WRITE(aux_ctl_reg,
222                    DP_AUX_CH_CTL_TIME_OUT_400us |
223                    (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
224                    (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
225                    (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
226         }
227
228         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
229 }
230
231 static void vlv_psr_enable_source(struct intel_dp *intel_dp)
232 {
233         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
234         struct drm_device *dev = dig_port->base.base.dev;
235         struct drm_i915_private *dev_priv = dev->dev_private;
236         struct drm_crtc *crtc = dig_port->base.base.crtc;
237         enum pipe pipe = to_intel_crtc(crtc)->pipe;
238
239         /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
240         I915_WRITE(VLV_PSRCTL(pipe),
241                    VLV_EDP_PSR_MODE_SW_TIMER |
242                    VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
243                    VLV_EDP_PSR_ENABLE);
244 }
245
246 static void vlv_psr_activate(struct intel_dp *intel_dp)
247 {
248         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
249         struct drm_device *dev = dig_port->base.base.dev;
250         struct drm_i915_private *dev_priv = dev->dev_private;
251         struct drm_crtc *crtc = dig_port->base.base.crtc;
252         enum pipe pipe = to_intel_crtc(crtc)->pipe;
253
254         /* Let's do the transition from PSR_state 1 to PSR_state 2
255          * that is PSR transition to active - static frame transmission.
256          * Then Hardware is responsible for the transition to PSR_state 3
257          * that is PSR active - no Remote Frame Buffer (RFB) update.
258          */
259         I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
260                    VLV_EDP_PSR_ACTIVE_ENTRY);
261 }
262
263 static void hsw_psr_enable_source(struct intel_dp *intel_dp)
264 {
265         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
266         struct drm_device *dev = dig_port->base.base.dev;
267         struct drm_i915_private *dev_priv = dev->dev_private;
268
269         uint32_t max_sleep_time = 0x1f;
270         /*
271          * Let's respect VBT in case VBT asks a higher idle_frame value.
272          * Let's use 6 as the minimum to cover all known cases including
273          * the off-by-one issue that HW has in some cases. Also there are
274          * cases where sink should be able to train
275          * with the 5 or 6 idle patterns.
276          */
277         uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
278         uint32_t val = 0x0;
279
280         if (IS_HASWELL(dev))
281                 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
282
283         I915_WRITE(EDP_PSR_CTL, val |
284                    max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
285                    idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
286                    EDP_PSR_ENABLE);
287
288         if (dev_priv->psr.psr2_support)
289                 I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
290                                 EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
291 }
292
293 static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
294 {
295         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
296         struct drm_device *dev = dig_port->base.base.dev;
297         struct drm_i915_private *dev_priv = dev->dev_private;
298         struct drm_crtc *crtc = dig_port->base.base.crtc;
299         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
300
301         lockdep_assert_held(&dev_priv->psr.lock);
302         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
303         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
304
305         dev_priv->psr.source_ok = false;
306
307         if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
308                 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
309                 return false;
310         }
311
312         if (!i915.enable_psr) {
313                 DRM_DEBUG_KMS("PSR disable by flag\n");
314                 return false;
315         }
316
317         if (IS_HASWELL(dev) &&
318             I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
319                       S3D_ENABLE) {
320                 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
321                 return false;
322         }
323
324         if (IS_HASWELL(dev) &&
325             intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
326                 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
327                 return false;
328         }
329
330         if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
331             ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
332                 DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
333                 return false;
334         }
335
336         dev_priv->psr.source_ok = true;
337         return true;
338 }
339
340 static void intel_psr_activate(struct intel_dp *intel_dp)
341 {
342         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
343         struct drm_device *dev = intel_dig_port->base.base.dev;
344         struct drm_i915_private *dev_priv = dev->dev_private;
345
346         WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
347         WARN_ON(dev_priv->psr.active);
348         lockdep_assert_held(&dev_priv->psr.lock);
349
350         /* Enable/Re-enable PSR on the host */
351         if (HAS_DDI(dev))
352                 /* On HSW+ after we enable PSR on source it will activate it
353                  * as soon as it match configure idle_frame count. So
354                  * we just actually enable it here on activation time.
355                  */
356                 hsw_psr_enable_source(intel_dp);
357         else
358                 vlv_psr_activate(intel_dp);
359
360         dev_priv->psr.active = true;
361 }
362
363 /**
364  * intel_psr_enable - Enable PSR
365  * @intel_dp: Intel DP
366  *
367  * This function can only be called after the pipe is fully trained and enabled.
368  */
369 void intel_psr_enable(struct intel_dp *intel_dp)
370 {
371         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
372         struct drm_device *dev = intel_dig_port->base.base.dev;
373         struct drm_i915_private *dev_priv = dev->dev_private;
374         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
375
376         if (!HAS_PSR(dev)) {
377                 DRM_DEBUG_KMS("PSR not supported on this platform\n");
378                 return;
379         }
380
381         if (!is_edp_psr(intel_dp)) {
382                 DRM_DEBUG_KMS("PSR not supported by this panel\n");
383                 return;
384         }
385
386         mutex_lock(&dev_priv->psr.lock);
387         if (dev_priv->psr.enabled) {
388                 DRM_DEBUG_KMS("PSR already in use\n");
389                 goto unlock;
390         }
391
392         if (!intel_psr_match_conditions(intel_dp))
393                 goto unlock;
394
395         dev_priv->psr.busy_frontbuffer_bits = 0;
396
397         if (HAS_DDI(dev)) {
398                 hsw_psr_setup_vsc(intel_dp);
399
400                 if (dev_priv->psr.psr2_support) {
401                         /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
402                         if (crtc->config->pipe_src_w > 3200 ||
403                                 crtc->config->pipe_src_h > 2000)
404                                 dev_priv->psr.psr2_support = false;
405                         else
406                                 skl_psr_setup_su_vsc(intel_dp);
407                 }
408
409                 /*
410                  * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
411                  * Also mask LPSP to avoid dependency on other drivers that
412                  * might block runtime_pm besides preventing other hw tracking
413                  * issues now we can rely on frontbuffer tracking.
414                  */
415                 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
416                            EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
417
418                 /* Enable PSR on the panel */
419                 hsw_psr_enable_sink(intel_dp);
420
421                 if (INTEL_INFO(dev)->gen >= 9)
422                         intel_psr_activate(intel_dp);
423         } else {
424                 vlv_psr_setup_vsc(intel_dp);
425
426                 /* Enable PSR on the panel */
427                 vlv_psr_enable_sink(intel_dp);
428
429                 /* On HSW+ enable_source also means go to PSR entry/active
430                  * state as soon as idle_frame achieved and here would be
431                  * to soon. However on VLV enable_source just enable PSR
432                  * but let it on inactive state. So we might do this prior
433                  * to active transition, i.e. here.
434                  */
435                 vlv_psr_enable_source(intel_dp);
436         }
437
438         /*
439          * FIXME: Activation should happen immediately since this function
440          * is just called after pipe is fully trained and enabled.
441          * However on every platform we face issues when first activation
442          * follows a modeset so quickly.
443          *     - On VLV/CHV we get bank screen on first activation
444          *     - On HSW/BDW we get a recoverable frozen screen until next
445          *       exit-activate sequence.
446          */
447         if (INTEL_INFO(dev)->gen < 9)
448                 schedule_delayed_work(&dev_priv->psr.work,
449                                       msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
450
451         dev_priv->psr.enabled = intel_dp;
452 unlock:
453         mutex_unlock(&dev_priv->psr.lock);
454 }
455
456 static void vlv_psr_disable(struct intel_dp *intel_dp)
457 {
458         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
459         struct drm_device *dev = intel_dig_port->base.base.dev;
460         struct drm_i915_private *dev_priv = dev->dev_private;
461         struct intel_crtc *intel_crtc =
462                 to_intel_crtc(intel_dig_port->base.base.crtc);
463         uint32_t val;
464
465         if (dev_priv->psr.active) {
466                 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
467                 if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
468                               VLV_EDP_PSR_IN_TRANS) == 0, 1))
469                         WARN(1, "PSR transition took longer than expected\n");
470
471                 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
472                 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
473                 val &= ~VLV_EDP_PSR_ENABLE;
474                 val &= ~VLV_EDP_PSR_MODE_MASK;
475                 I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
476
477                 dev_priv->psr.active = false;
478         } else {
479                 WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
480         }
481 }
482
483 static void hsw_psr_disable(struct intel_dp *intel_dp)
484 {
485         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
486         struct drm_device *dev = intel_dig_port->base.base.dev;
487         struct drm_i915_private *dev_priv = dev->dev_private;
488
489         if (dev_priv->psr.active) {
490                 I915_WRITE(EDP_PSR_CTL,
491                            I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
492
493                 /* Wait till PSR is idle */
494                 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
495                                EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
496                         DRM_ERROR("Timed out waiting for PSR Idle State\n");
497
498                 dev_priv->psr.active = false;
499         } else {
500                 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
501         }
502 }
503
504 /**
505  * intel_psr_disable - Disable PSR
506  * @intel_dp: Intel DP
507  *
508  * This function needs to be called before disabling pipe.
509  */
510 void intel_psr_disable(struct intel_dp *intel_dp)
511 {
512         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
513         struct drm_device *dev = intel_dig_port->base.base.dev;
514         struct drm_i915_private *dev_priv = dev->dev_private;
515
516         mutex_lock(&dev_priv->psr.lock);
517         if (!dev_priv->psr.enabled) {
518                 mutex_unlock(&dev_priv->psr.lock);
519                 return;
520         }
521
522         /* Disable PSR on Source */
523         if (HAS_DDI(dev))
524                 hsw_psr_disable(intel_dp);
525         else
526                 vlv_psr_disable(intel_dp);
527
528         /* Disable PSR on Sink */
529         drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
530
531         dev_priv->psr.enabled = NULL;
532         mutex_unlock(&dev_priv->psr.lock);
533
534         cancel_delayed_work_sync(&dev_priv->psr.work);
535 }
536
537 static void intel_psr_work(struct work_struct *work)
538 {
539         struct drm_i915_private *dev_priv =
540                 container_of(work, typeof(*dev_priv), psr.work.work);
541         struct intel_dp *intel_dp = dev_priv->psr.enabled;
542         struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
543         enum pipe pipe = to_intel_crtc(crtc)->pipe;
544
545         /* We have to make sure PSR is ready for re-enable
546          * otherwise it keeps disabled until next full enable/disable cycle.
547          * PSR might take some time to get fully disabled
548          * and be ready for re-enable.
549          */
550         if (HAS_DDI(dev_priv->dev)) {
551                 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
552                               EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
553                         DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
554                         return;
555                 }
556         } else {
557                 if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
558                               VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
559                         DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
560                         return;
561                 }
562         }
563         mutex_lock(&dev_priv->psr.lock);
564         intel_dp = dev_priv->psr.enabled;
565
566         if (!intel_dp)
567                 goto unlock;
568
569         /*
570          * The delayed work can race with an invalidate hence we need to
571          * recheck. Since psr_flush first clears this and then reschedules we
572          * won't ever miss a flush when bailing out here.
573          */
574         if (dev_priv->psr.busy_frontbuffer_bits)
575                 goto unlock;
576
577         intel_psr_activate(intel_dp);
578 unlock:
579         mutex_unlock(&dev_priv->psr.lock);
580 }
581
582 static void intel_psr_exit(struct drm_device *dev)
583 {
584         struct drm_i915_private *dev_priv = dev->dev_private;
585         struct intel_dp *intel_dp = dev_priv->psr.enabled;
586         struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
587         enum pipe pipe = to_intel_crtc(crtc)->pipe;
588         u32 val;
589
590         if (!dev_priv->psr.active)
591                 return;
592
593         if (HAS_DDI(dev)) {
594                 val = I915_READ(EDP_PSR_CTL);
595
596                 WARN_ON(!(val & EDP_PSR_ENABLE));
597
598                 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
599         } else {
600                 val = I915_READ(VLV_PSRCTL(pipe));
601
602                 /* Here we do the transition from PSR_state 3 to PSR_state 5
603                  * directly once PSR State 4 that is active with single frame
604                  * update can be skipped. PSR_state 5 that is PSR exit then
605                  * Hardware is responsible to transition back to PSR_state 1
606                  * that is PSR inactive. Same state after
607                  * vlv_edp_psr_enable_source.
608                  */
609                 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
610                 I915_WRITE(VLV_PSRCTL(pipe), val);
611
612                 /* Send AUX wake up - Spec says after transitioning to PSR
613                  * active we have to send AUX wake up by writing 01h in DPCD
614                  * 600h of sink device.
615                  * XXX: This might slow down the transition, but without this
616                  * HW doesn't complete the transition to PSR_state 1 and we
617                  * never get the screen updated.
618                  */
619                 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
620                                    DP_SET_POWER_D0);
621         }
622
623         dev_priv->psr.active = false;
624 }
625
626 /**
627  * intel_psr_single_frame_update - Single Frame Update
628  * @dev: DRM device
629  * @frontbuffer_bits: frontbuffer plane tracking bits
630  *
631  * Some platforms support a single frame update feature that is used to
632  * send and update only one frame on Remote Frame Buffer.
633  * So far it is only implemented for Valleyview and Cherryview because
634  * hardware requires this to be done before a page flip.
635  */
636 void intel_psr_single_frame_update(struct drm_device *dev,
637                                    unsigned frontbuffer_bits)
638 {
639         struct drm_i915_private *dev_priv = dev->dev_private;
640         struct drm_crtc *crtc;
641         enum pipe pipe;
642         u32 val;
643
644         /*
645          * Single frame update is already supported on BDW+ but it requires
646          * many W/A and it isn't really needed.
647          */
648         if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
649                 return;
650
651         mutex_lock(&dev_priv->psr.lock);
652         if (!dev_priv->psr.enabled) {
653                 mutex_unlock(&dev_priv->psr.lock);
654                 return;
655         }
656
657         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
658         pipe = to_intel_crtc(crtc)->pipe;
659
660         if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
661                 val = I915_READ(VLV_PSRCTL(pipe));
662
663                 /*
664                  * We need to set this bit before writing registers for a flip.
665                  * This bit will be self-clear when it gets to the PSR active state.
666                  */
667                 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
668         }
669         mutex_unlock(&dev_priv->psr.lock);
670 }
671
672 /**
673  * intel_psr_invalidate - Invalidade PSR
674  * @dev: DRM device
675  * @frontbuffer_bits: frontbuffer plane tracking bits
676  *
677  * Since the hardware frontbuffer tracking has gaps we need to integrate
678  * with the software frontbuffer tracking. This function gets called every
679  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
680  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
681  *
682  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
683  */
684 void intel_psr_invalidate(struct drm_device *dev,
685                           unsigned frontbuffer_bits)
686 {
687         struct drm_i915_private *dev_priv = dev->dev_private;
688         struct drm_crtc *crtc;
689         enum pipe pipe;
690
691         mutex_lock(&dev_priv->psr.lock);
692         if (!dev_priv->psr.enabled) {
693                 mutex_unlock(&dev_priv->psr.lock);
694                 return;
695         }
696
697         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
698         pipe = to_intel_crtc(crtc)->pipe;
699
700         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
701         dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
702
703         if (frontbuffer_bits)
704                 intel_psr_exit(dev);
705
706         mutex_unlock(&dev_priv->psr.lock);
707 }
708
709 /**
710  * intel_psr_flush - Flush PSR
711  * @dev: DRM device
712  * @frontbuffer_bits: frontbuffer plane tracking bits
713  * @origin: which operation caused the flush
714  *
715  * Since the hardware frontbuffer tracking has gaps we need to integrate
716  * with the software frontbuffer tracking. This function gets called every
717  * time frontbuffer rendering has completed and flushed out to memory. PSR
718  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
719  *
720  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
721  */
722 void intel_psr_flush(struct drm_device *dev,
723                      unsigned frontbuffer_bits, enum fb_op_origin origin)
724 {
725         struct drm_i915_private *dev_priv = dev->dev_private;
726         struct drm_crtc *crtc;
727         enum pipe pipe;
728
729         mutex_lock(&dev_priv->psr.lock);
730         if (!dev_priv->psr.enabled) {
731                 mutex_unlock(&dev_priv->psr.lock);
732                 return;
733         }
734
735         crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
736         pipe = to_intel_crtc(crtc)->pipe;
737
738         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
739         dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
740
741         /* By definition flush = invalidate + flush */
742         if (frontbuffer_bits)
743                 intel_psr_exit(dev);
744
745         if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
746                 if (!work_busy(&dev_priv->psr.work.work))
747                         schedule_delayed_work(&dev_priv->psr.work,
748                                               msecs_to_jiffies(100));
749         mutex_unlock(&dev_priv->psr.lock);
750 }
751
752 /**
753  * intel_psr_init - Init basic PSR work and mutex.
754  * @dev: DRM device
755  *
756  * This function is  called only once at driver load to initialize basic
757  * PSR stuff.
758  */
759 void intel_psr_init(struct drm_device *dev)
760 {
761         struct drm_i915_private *dev_priv = dev->dev_private;
762
763         dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
764                 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
765
766         INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
767         mutex_init(&dev_priv->psr.lock);
768 }