drm/i915: get a runtime PM ref for the deferred GT powersave enabling
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <linux/device.h>
31 #include <drm/drmP.h>
32 #include <drm/i915_drm.h>
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <drm/drm_crtc_helper.h>
40
41 static struct drm_driver driver;
42
43 #define GEN_DEFAULT_PIPEOFFSETS \
44         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
45                           PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
46         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
47                            TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
48         .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
49         .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
50         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
52
53 static const struct intel_device_info intel_i830_info = {
54         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
55         .has_overlay = 1, .overlay_needs_physical = 1,
56         .ring_mask = RENDER_RING,
57         GEN_DEFAULT_PIPEOFFSETS,
58 };
59
60 static const struct intel_device_info intel_845g_info = {
61         .gen = 2, .num_pipes = 1,
62         .has_overlay = 1, .overlay_needs_physical = 1,
63         .ring_mask = RENDER_RING,
64         GEN_DEFAULT_PIPEOFFSETS,
65 };
66
67 static const struct intel_device_info intel_i85x_info = {
68         .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
69         .cursor_needs_physical = 1,
70         .has_overlay = 1, .overlay_needs_physical = 1,
71         .has_fbc = 1,
72         .ring_mask = RENDER_RING,
73         GEN_DEFAULT_PIPEOFFSETS,
74 };
75
76 static const struct intel_device_info intel_i865g_info = {
77         .gen = 2, .num_pipes = 1,
78         .has_overlay = 1, .overlay_needs_physical = 1,
79         .ring_mask = RENDER_RING,
80         GEN_DEFAULT_PIPEOFFSETS,
81 };
82
83 static const struct intel_device_info intel_i915g_info = {
84         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
85         .has_overlay = 1, .overlay_needs_physical = 1,
86         .ring_mask = RENDER_RING,
87         GEN_DEFAULT_PIPEOFFSETS,
88 };
89 static const struct intel_device_info intel_i915gm_info = {
90         .gen = 3, .is_mobile = 1, .num_pipes = 2,
91         .cursor_needs_physical = 1,
92         .has_overlay = 1, .overlay_needs_physical = 1,
93         .supports_tv = 1,
94         .has_fbc = 1,
95         .ring_mask = RENDER_RING,
96         GEN_DEFAULT_PIPEOFFSETS,
97 };
98 static const struct intel_device_info intel_i945g_info = {
99         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
100         .has_overlay = 1, .overlay_needs_physical = 1,
101         .ring_mask = RENDER_RING,
102         GEN_DEFAULT_PIPEOFFSETS,
103 };
104 static const struct intel_device_info intel_i945gm_info = {
105         .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
106         .has_hotplug = 1, .cursor_needs_physical = 1,
107         .has_overlay = 1, .overlay_needs_physical = 1,
108         .supports_tv = 1,
109         .has_fbc = 1,
110         .ring_mask = RENDER_RING,
111         GEN_DEFAULT_PIPEOFFSETS,
112 };
113
114 static const struct intel_device_info intel_i965g_info = {
115         .gen = 4, .is_broadwater = 1, .num_pipes = 2,
116         .has_hotplug = 1,
117         .has_overlay = 1,
118         .ring_mask = RENDER_RING,
119         GEN_DEFAULT_PIPEOFFSETS,
120 };
121
122 static const struct intel_device_info intel_i965gm_info = {
123         .gen = 4, .is_crestline = 1, .num_pipes = 2,
124         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125         .has_overlay = 1,
126         .supports_tv = 1,
127         .ring_mask = RENDER_RING,
128         GEN_DEFAULT_PIPEOFFSETS,
129 };
130
131 static const struct intel_device_info intel_g33_info = {
132         .gen = 3, .is_g33 = 1, .num_pipes = 2,
133         .need_gfx_hws = 1, .has_hotplug = 1,
134         .has_overlay = 1,
135         .ring_mask = RENDER_RING,
136         GEN_DEFAULT_PIPEOFFSETS,
137 };
138
139 static const struct intel_device_info intel_g45_info = {
140         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
141         .has_pipe_cxsr = 1, .has_hotplug = 1,
142         .ring_mask = RENDER_RING | BSD_RING,
143         GEN_DEFAULT_PIPEOFFSETS,
144 };
145
146 static const struct intel_device_info intel_gm45_info = {
147         .gen = 4, .is_g4x = 1, .num_pipes = 2,
148         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
149         .has_pipe_cxsr = 1, .has_hotplug = 1,
150         .supports_tv = 1,
151         .ring_mask = RENDER_RING | BSD_RING,
152         GEN_DEFAULT_PIPEOFFSETS,
153 };
154
155 static const struct intel_device_info intel_pineview_info = {
156         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
157         .need_gfx_hws = 1, .has_hotplug = 1,
158         .has_overlay = 1,
159         GEN_DEFAULT_PIPEOFFSETS,
160 };
161
162 static const struct intel_device_info intel_ironlake_d_info = {
163         .gen = 5, .num_pipes = 2,
164         .need_gfx_hws = 1, .has_hotplug = 1,
165         .ring_mask = RENDER_RING | BSD_RING,
166         GEN_DEFAULT_PIPEOFFSETS,
167 };
168
169 static const struct intel_device_info intel_ironlake_m_info = {
170         .gen = 5, .is_mobile = 1, .num_pipes = 2,
171         .need_gfx_hws = 1, .has_hotplug = 1,
172         .has_fbc = 1,
173         .ring_mask = RENDER_RING | BSD_RING,
174         GEN_DEFAULT_PIPEOFFSETS,
175 };
176
177 static const struct intel_device_info intel_sandybridge_d_info = {
178         .gen = 6, .num_pipes = 2,
179         .need_gfx_hws = 1, .has_hotplug = 1,
180         .has_fbc = 1,
181         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
182         .has_llc = 1,
183         GEN_DEFAULT_PIPEOFFSETS,
184 };
185
186 static const struct intel_device_info intel_sandybridge_m_info = {
187         .gen = 6, .is_mobile = 1, .num_pipes = 2,
188         .need_gfx_hws = 1, .has_hotplug = 1,
189         .has_fbc = 1,
190         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
191         .has_llc = 1,
192         GEN_DEFAULT_PIPEOFFSETS,
193 };
194
195 #define GEN7_FEATURES  \
196         .gen = 7, .num_pipes = 3, \
197         .need_gfx_hws = 1, .has_hotplug = 1, \
198         .has_fbc = 1, \
199         .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
200         .has_llc = 1
201
202 static const struct intel_device_info intel_ivybridge_d_info = {
203         GEN7_FEATURES,
204         .is_ivybridge = 1,
205         GEN_DEFAULT_PIPEOFFSETS,
206 };
207
208 static const struct intel_device_info intel_ivybridge_m_info = {
209         GEN7_FEATURES,
210         .is_ivybridge = 1,
211         .is_mobile = 1,
212         GEN_DEFAULT_PIPEOFFSETS,
213 };
214
215 static const struct intel_device_info intel_ivybridge_q_info = {
216         GEN7_FEATURES,
217         .is_ivybridge = 1,
218         .num_pipes = 0, /* legal, last one wins */
219         GEN_DEFAULT_PIPEOFFSETS,
220 };
221
222 static const struct intel_device_info intel_valleyview_m_info = {
223         GEN7_FEATURES,
224         .is_mobile = 1,
225         .num_pipes = 2,
226         .is_valleyview = 1,
227         .display_mmio_offset = VLV_DISPLAY_BASE,
228         .has_fbc = 0, /* legal, last one wins */
229         .has_llc = 0, /* legal, last one wins */
230         GEN_DEFAULT_PIPEOFFSETS,
231 };
232
233 static const struct intel_device_info intel_valleyview_d_info = {
234         GEN7_FEATURES,
235         .num_pipes = 2,
236         .is_valleyview = 1,
237         .display_mmio_offset = VLV_DISPLAY_BASE,
238         .has_fbc = 0, /* legal, last one wins */
239         .has_llc = 0, /* legal, last one wins */
240         GEN_DEFAULT_PIPEOFFSETS,
241 };
242
243 static const struct intel_device_info intel_haswell_d_info = {
244         GEN7_FEATURES,
245         .is_haswell = 1,
246         .has_ddi = 1,
247         .has_fpga_dbg = 1,
248         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249         GEN_DEFAULT_PIPEOFFSETS,
250 };
251
252 static const struct intel_device_info intel_haswell_m_info = {
253         GEN7_FEATURES,
254         .is_haswell = 1,
255         .is_mobile = 1,
256         .has_ddi = 1,
257         .has_fpga_dbg = 1,
258         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259         GEN_DEFAULT_PIPEOFFSETS,
260 };
261
262 static const struct intel_device_info intel_broadwell_d_info = {
263         .gen = 8, .num_pipes = 3,
264         .need_gfx_hws = 1, .has_hotplug = 1,
265         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
266         .has_llc = 1,
267         .has_ddi = 1,
268         .has_fbc = 1,
269         GEN_DEFAULT_PIPEOFFSETS,
270 };
271
272 static const struct intel_device_info intel_broadwell_m_info = {
273         .gen = 8, .is_mobile = 1, .num_pipes = 3,
274         .need_gfx_hws = 1, .has_hotplug = 1,
275         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
276         .has_llc = 1,
277         .has_ddi = 1,
278         .has_fbc = 1,
279         GEN_DEFAULT_PIPEOFFSETS,
280 };
281
282 static const struct intel_device_info intel_broadwell_gt3d_info = {
283         .gen = 8, .num_pipes = 3,
284         .need_gfx_hws = 1, .has_hotplug = 1,
285         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
286         .has_llc = 1,
287         .has_ddi = 1,
288         .has_fbc = 1,
289         GEN_DEFAULT_PIPEOFFSETS,
290 };
291
292 static const struct intel_device_info intel_broadwell_gt3m_info = {
293         .gen = 8, .is_mobile = 1, .num_pipes = 3,
294         .need_gfx_hws = 1, .has_hotplug = 1,
295         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
296         .has_llc = 1,
297         .has_ddi = 1,
298         .has_fbc = 1,
299         GEN_DEFAULT_PIPEOFFSETS,
300 };
301
302 /*
303  * Make sure any device matches here are from most specific to most
304  * general.  For example, since the Quanta match is based on the subsystem
305  * and subvendor IDs, we need it to come before the more general IVB
306  * PCI ID matches, otherwise we'll use the wrong info struct above.
307  */
308 #define INTEL_PCI_IDS \
309         INTEL_I830_IDS(&intel_i830_info),       \
310         INTEL_I845G_IDS(&intel_845g_info),      \
311         INTEL_I85X_IDS(&intel_i85x_info),       \
312         INTEL_I865G_IDS(&intel_i865g_info),     \
313         INTEL_I915G_IDS(&intel_i915g_info),     \
314         INTEL_I915GM_IDS(&intel_i915gm_info),   \
315         INTEL_I945G_IDS(&intel_i945g_info),     \
316         INTEL_I945GM_IDS(&intel_i945gm_info),   \
317         INTEL_I965G_IDS(&intel_i965g_info),     \
318         INTEL_G33_IDS(&intel_g33_info),         \
319         INTEL_I965GM_IDS(&intel_i965gm_info),   \
320         INTEL_GM45_IDS(&intel_gm45_info),       \
321         INTEL_G45_IDS(&intel_g45_info),         \
322         INTEL_PINEVIEW_IDS(&intel_pineview_info),       \
323         INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),   \
324         INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),   \
325         INTEL_SNB_D_IDS(&intel_sandybridge_d_info),     \
326         INTEL_SNB_M_IDS(&intel_sandybridge_m_info),     \
327         INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
328         INTEL_IVB_M_IDS(&intel_ivybridge_m_info),       \
329         INTEL_IVB_D_IDS(&intel_ivybridge_d_info),       \
330         INTEL_HSW_D_IDS(&intel_haswell_d_info), \
331         INTEL_HSW_M_IDS(&intel_haswell_m_info), \
332         INTEL_VLV_M_IDS(&intel_valleyview_m_info),      \
333         INTEL_VLV_D_IDS(&intel_valleyview_d_info),      \
334         INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),   \
335         INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),   \
336         INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
337         INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info)
338
339 static const struct pci_device_id pciidlist[] = {               /* aka */
340         INTEL_PCI_IDS,
341         {0, 0, 0}
342 };
343
344 #if defined(CONFIG_DRM_I915_KMS)
345 MODULE_DEVICE_TABLE(pci, pciidlist);
346 #endif
347
348 void intel_detect_pch(struct drm_device *dev)
349 {
350         struct drm_i915_private *dev_priv = dev->dev_private;
351         struct pci_dev *pch = NULL;
352
353         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
354          * (which really amounts to a PCH but no South Display).
355          */
356         if (INTEL_INFO(dev)->num_pipes == 0) {
357                 dev_priv->pch_type = PCH_NOP;
358                 return;
359         }
360
361         /*
362          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
363          * make graphics device passthrough work easy for VMM, that only
364          * need to expose ISA bridge to let driver know the real hardware
365          * underneath. This is a requirement from virtualization team.
366          *
367          * In some virtualized environments (e.g. XEN), there is irrelevant
368          * ISA bridge in the system. To work reliably, we should scan trhough
369          * all the ISA bridge devices and check for the first match, instead
370          * of only checking the first one.
371          */
372         while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
373                 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
374                         unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
375                         dev_priv->pch_id = id;
376
377                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
378                                 dev_priv->pch_type = PCH_IBX;
379                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
380                                 WARN_ON(!IS_GEN5(dev));
381                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
382                                 dev_priv->pch_type = PCH_CPT;
383                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
384                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
385                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
386                                 /* PantherPoint is CPT compatible */
387                                 dev_priv->pch_type = PCH_CPT;
388                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
389                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
390                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
391                                 dev_priv->pch_type = PCH_LPT;
392                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
393                                 WARN_ON(!IS_HASWELL(dev));
394                                 WARN_ON(IS_ULT(dev));
395                         } else if (IS_BROADWELL(dev)) {
396                                 dev_priv->pch_type = PCH_LPT;
397                                 dev_priv->pch_id =
398                                         INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
399                                 DRM_DEBUG_KMS("This is Broadwell, assuming "
400                                               "LynxPoint LP PCH\n");
401                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
402                                 dev_priv->pch_type = PCH_LPT;
403                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
404                                 WARN_ON(!IS_HASWELL(dev));
405                                 WARN_ON(!IS_ULT(dev));
406                         } else
407                                 continue;
408
409                         break;
410                 }
411         }
412         if (!pch)
413                 DRM_DEBUG_KMS("No PCH found.\n");
414
415         pci_dev_put(pch);
416 }
417
418 bool i915_semaphore_is_enabled(struct drm_device *dev)
419 {
420         if (INTEL_INFO(dev)->gen < 6)
421                 return false;
422
423         if (i915.semaphores >= 0)
424                 return i915.semaphores;
425
426         /* Until we get further testing... */
427         if (IS_GEN8(dev))
428                 return false;
429
430 #ifdef CONFIG_INTEL_IOMMU
431         /* Enable semaphores on SNB when IO remapping is off */
432         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
433                 return false;
434 #endif
435
436         return true;
437 }
438
439 static int i915_drm_freeze(struct drm_device *dev)
440 {
441         struct drm_i915_private *dev_priv = dev->dev_private;
442         struct drm_crtc *crtc;
443
444         intel_runtime_pm_get(dev_priv);
445
446         /* ignore lid events during suspend */
447         mutex_lock(&dev_priv->modeset_restore_lock);
448         dev_priv->modeset_restore = MODESET_SUSPENDED;
449         mutex_unlock(&dev_priv->modeset_restore_lock);
450
451         /* We do a lot of poking in a lot of registers, make sure they work
452          * properly. */
453         intel_display_set_init_power(dev_priv, true);
454
455         drm_kms_helper_poll_disable(dev);
456
457         pci_save_state(dev->pdev);
458
459         /* If KMS is active, we do the leavevt stuff here */
460         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
461                 int error;
462
463                 error = i915_gem_suspend(dev);
464                 if (error) {
465                         dev_err(&dev->pdev->dev,
466                                 "GEM idle failed, resume might fail\n");
467                         return error;
468                 }
469
470                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
471
472                 drm_irq_uninstall(dev);
473                 dev_priv->enable_hotplug_processing = false;
474                 /*
475                  * Disable CRTCs directly since we want to preserve sw state
476                  * for _thaw.
477                  */
478                 mutex_lock(&dev->mode_config.mutex);
479                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
480                         dev_priv->display.crtc_disable(crtc);
481                 mutex_unlock(&dev->mode_config.mutex);
482
483                 intel_modeset_suspend_hw(dev);
484         }
485
486         i915_gem_suspend_gtt_mappings(dev);
487
488         i915_save_state(dev);
489
490         intel_opregion_fini(dev);
491         intel_uncore_fini(dev);
492
493         console_lock();
494         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
495         console_unlock();
496
497         dev_priv->suspend_count++;
498
499         return 0;
500 }
501
502 int i915_suspend(struct drm_device *dev, pm_message_t state)
503 {
504         int error;
505
506         if (!dev || !dev->dev_private) {
507                 DRM_ERROR("dev: %p\n", dev);
508                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
509                 return -ENODEV;
510         }
511
512         if (state.event == PM_EVENT_PRETHAW)
513                 return 0;
514
515
516         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
517                 return 0;
518
519         error = i915_drm_freeze(dev);
520         if (error)
521                 return error;
522
523         if (state.event == PM_EVENT_SUSPEND) {
524                 /* Shut down the device */
525                 pci_disable_device(dev->pdev);
526                 pci_set_power_state(dev->pdev, PCI_D3hot);
527         }
528
529         return 0;
530 }
531
532 void intel_console_resume(struct work_struct *work)
533 {
534         struct drm_i915_private *dev_priv =
535                 container_of(work, struct drm_i915_private,
536                              console_resume_work);
537         struct drm_device *dev = dev_priv->dev;
538
539         console_lock();
540         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
541         console_unlock();
542 }
543
544 static void intel_resume_hotplug(struct drm_device *dev)
545 {
546         struct drm_mode_config *mode_config = &dev->mode_config;
547         struct intel_encoder *encoder;
548
549         mutex_lock(&mode_config->mutex);
550         DRM_DEBUG_KMS("running encoder hotplug functions\n");
551
552         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
553                 if (encoder->hot_plug)
554                         encoder->hot_plug(encoder);
555
556         mutex_unlock(&mode_config->mutex);
557
558         /* Just fire off a uevent and let userspace tell us what to do */
559         drm_helper_hpd_irq_event(dev);
560 }
561
562 static int i915_drm_thaw_early(struct drm_device *dev)
563 {
564         struct drm_i915_private *dev_priv = dev->dev_private;
565
566         intel_uncore_early_sanitize(dev);
567         intel_uncore_sanitize(dev);
568         intel_power_domains_init_hw(dev_priv);
569
570         return 0;
571 }
572
573 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
574 {
575         struct drm_i915_private *dev_priv = dev->dev_private;
576
577         if (drm_core_check_feature(dev, DRIVER_MODESET) &&
578             restore_gtt_mappings) {
579                 mutex_lock(&dev->struct_mutex);
580                 i915_gem_restore_gtt_mappings(dev);
581                 mutex_unlock(&dev->struct_mutex);
582         }
583
584         i915_restore_state(dev);
585         intel_opregion_setup(dev);
586
587         /* KMS EnterVT equivalent */
588         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
589                 intel_init_pch_refclk(dev);
590                 drm_mode_config_reset(dev);
591
592                 mutex_lock(&dev->struct_mutex);
593                 if (i915_gem_init_hw(dev)) {
594                         DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
595                         atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
596                 }
597                 mutex_unlock(&dev->struct_mutex);
598
599                 /* We need working interrupts for modeset enabling ... */
600                 drm_irq_install(dev, dev->pdev->irq);
601
602                 intel_modeset_init_hw(dev);
603
604                 drm_modeset_lock_all(dev);
605                 intel_modeset_setup_hw_state(dev, true);
606                 drm_modeset_unlock_all(dev);
607
608                 /*
609                  * ... but also need to make sure that hotplug processing
610                  * doesn't cause havoc. Like in the driver load code we don't
611                  * bother with the tiny race here where we might loose hotplug
612                  * notifications.
613                  * */
614                 intel_hpd_init(dev);
615                 dev_priv->enable_hotplug_processing = true;
616                 /* Config may have changed between suspend and resume */
617                 intel_resume_hotplug(dev);
618         }
619
620         intel_opregion_init(dev);
621
622         /*
623          * The console lock can be pretty contented on resume due
624          * to all the printk activity.  Try to keep it out of the hot
625          * path of resume if possible.
626          */
627         if (console_trylock()) {
628                 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
629                 console_unlock();
630         } else {
631                 schedule_work(&dev_priv->console_resume_work);
632         }
633
634         mutex_lock(&dev_priv->modeset_restore_lock);
635         dev_priv->modeset_restore = MODESET_DONE;
636         mutex_unlock(&dev_priv->modeset_restore_lock);
637
638         intel_runtime_pm_put(dev_priv);
639         return 0;
640 }
641
642 static int i915_drm_thaw(struct drm_device *dev)
643 {
644         if (drm_core_check_feature(dev, DRIVER_MODESET))
645                 i915_check_and_clear_faults(dev);
646
647         return __i915_drm_thaw(dev, true);
648 }
649
650 static int i915_resume_early(struct drm_device *dev)
651 {
652         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
653                 return 0;
654
655         /*
656          * We have a resume ordering issue with the snd-hda driver also
657          * requiring our device to be power up. Due to the lack of a
658          * parent/child relationship we currently solve this with an early
659          * resume hook.
660          *
661          * FIXME: This should be solved with a special hdmi sink device or
662          * similar so that power domains can be employed.
663          */
664         if (pci_enable_device(dev->pdev))
665                 return -EIO;
666
667         pci_set_master(dev->pdev);
668
669         return i915_drm_thaw_early(dev);
670 }
671
672 int i915_resume(struct drm_device *dev)
673 {
674         struct drm_i915_private *dev_priv = dev->dev_private;
675         int ret;
676
677         /*
678          * Platforms with opregion should have sane BIOS, older ones (gen3 and
679          * earlier) need to restore the GTT mappings since the BIOS might clear
680          * all our scratch PTEs.
681          */
682         ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
683         if (ret)
684                 return ret;
685
686         drm_kms_helper_poll_enable(dev);
687         return 0;
688 }
689
690 static int i915_resume_legacy(struct drm_device *dev)
691 {
692         i915_resume_early(dev);
693         i915_resume(dev);
694
695         return 0;
696 }
697
698 /**
699  * i915_reset - reset chip after a hang
700  * @dev: drm device to reset
701  *
702  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
703  * reset or otherwise an error code.
704  *
705  * Procedure is fairly simple:
706  *   - reset the chip using the reset reg
707  *   - re-init context state
708  *   - re-init hardware status page
709  *   - re-init ring buffer
710  *   - re-init interrupt state
711  *   - re-init display
712  */
713 int i915_reset(struct drm_device *dev)
714 {
715         struct drm_i915_private *dev_priv = dev->dev_private;
716         bool simulated;
717         int ret;
718
719         if (!i915.reset)
720                 return 0;
721
722         mutex_lock(&dev->struct_mutex);
723
724         i915_gem_reset(dev);
725
726         simulated = dev_priv->gpu_error.stop_rings != 0;
727
728         ret = intel_gpu_reset(dev);
729
730         /* Also reset the gpu hangman. */
731         if (simulated) {
732                 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
733                 dev_priv->gpu_error.stop_rings = 0;
734                 if (ret == -ENODEV) {
735                         DRM_INFO("Reset not implemented, but ignoring "
736                                  "error for simulated gpu hangs\n");
737                         ret = 0;
738                 }
739         }
740
741         if (ret) {
742                 DRM_ERROR("Failed to reset chip: %i\n", ret);
743                 mutex_unlock(&dev->struct_mutex);
744                 return ret;
745         }
746
747         /* Ok, now get things going again... */
748
749         /*
750          * Everything depends on having the GTT running, so we need to start
751          * there.  Fortunately we don't need to do this unless we reset the
752          * chip at a PCI level.
753          *
754          * Next we need to restore the context, but we don't use those
755          * yet either...
756          *
757          * Ring buffer needs to be re-initialized in the KMS case, or if X
758          * was running at the time of the reset (i.e. we weren't VT
759          * switched away).
760          */
761         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
762                         !dev_priv->ums.mm_suspended) {
763                 dev_priv->ums.mm_suspended = 0;
764
765                 ret = i915_gem_init_hw(dev);
766                 mutex_unlock(&dev->struct_mutex);
767                 if (ret) {
768                         DRM_ERROR("Failed hw init on reset %d\n", ret);
769                         return ret;
770                 }
771
772                 /*
773                  * FIXME: This is horribly race against concurrent pageflip and
774                  * vblank wait ioctls since they can observe dev->irqs_disabled
775                  * being false when they shouldn't be able to.
776                  */
777                 drm_irq_uninstall(dev);
778                 drm_irq_install(dev, dev->pdev->irq);
779
780                 /* rps/rc6 re-init is necessary to restore state lost after the
781                  * reset and the re-install of drm irq. Skip for ironlake per
782                  * previous concerns that it doesn't respond well to some forms
783                  * of re-init after reset. */
784                 if (INTEL_INFO(dev)->gen > 5)
785                         intel_reset_gt_powersave(dev);
786
787                 intel_hpd_init(dev);
788         } else {
789                 mutex_unlock(&dev->struct_mutex);
790         }
791
792         return 0;
793 }
794
795 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
796 {
797         struct intel_device_info *intel_info =
798                 (struct intel_device_info *) ent->driver_data;
799
800         if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
801                 DRM_INFO("This hardware requires preliminary hardware support.\n"
802                          "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
803                 return -ENODEV;
804         }
805
806         /* Only bind to function 0 of the device. Early generations
807          * used function 1 as a placeholder for multi-head. This causes
808          * us confusion instead, especially on the systems where both
809          * functions have the same PCI-ID!
810          */
811         if (PCI_FUNC(pdev->devfn))
812                 return -ENODEV;
813
814         driver.driver_features &= ~(DRIVER_USE_AGP);
815
816         return drm_get_pci_dev(pdev, ent, &driver);
817 }
818
819 static void
820 i915_pci_remove(struct pci_dev *pdev)
821 {
822         struct drm_device *dev = pci_get_drvdata(pdev);
823
824         drm_put_dev(dev);
825 }
826
827 static int i915_pm_suspend(struct device *dev)
828 {
829         struct pci_dev *pdev = to_pci_dev(dev);
830         struct drm_device *drm_dev = pci_get_drvdata(pdev);
831
832         if (!drm_dev || !drm_dev->dev_private) {
833                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
834                 return -ENODEV;
835         }
836
837         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
838                 return 0;
839
840         return i915_drm_freeze(drm_dev);
841 }
842
843 static int i915_pm_suspend_late(struct device *dev)
844 {
845         struct pci_dev *pdev = to_pci_dev(dev);
846         struct drm_device *drm_dev = pci_get_drvdata(pdev);
847
848         /*
849          * We have a suspedn ordering issue with the snd-hda driver also
850          * requiring our device to be power up. Due to the lack of a
851          * parent/child relationship we currently solve this with an late
852          * suspend hook.
853          *
854          * FIXME: This should be solved with a special hdmi sink device or
855          * similar so that power domains can be employed.
856          */
857         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
858                 return 0;
859
860         pci_disable_device(pdev);
861         pci_set_power_state(pdev, PCI_D3hot);
862
863         return 0;
864 }
865
866 static int i915_pm_resume_early(struct device *dev)
867 {
868         struct pci_dev *pdev = to_pci_dev(dev);
869         struct drm_device *drm_dev = pci_get_drvdata(pdev);
870
871         return i915_resume_early(drm_dev);
872 }
873
874 static int i915_pm_resume(struct device *dev)
875 {
876         struct pci_dev *pdev = to_pci_dev(dev);
877         struct drm_device *drm_dev = pci_get_drvdata(pdev);
878
879         return i915_resume(drm_dev);
880 }
881
882 static int i915_pm_freeze(struct device *dev)
883 {
884         struct pci_dev *pdev = to_pci_dev(dev);
885         struct drm_device *drm_dev = pci_get_drvdata(pdev);
886
887         if (!drm_dev || !drm_dev->dev_private) {
888                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
889                 return -ENODEV;
890         }
891
892         return i915_drm_freeze(drm_dev);
893 }
894
895 static int i915_pm_thaw_early(struct device *dev)
896 {
897         struct pci_dev *pdev = to_pci_dev(dev);
898         struct drm_device *drm_dev = pci_get_drvdata(pdev);
899
900         return i915_drm_thaw_early(drm_dev);
901 }
902
903 static int i915_pm_thaw(struct device *dev)
904 {
905         struct pci_dev *pdev = to_pci_dev(dev);
906         struct drm_device *drm_dev = pci_get_drvdata(pdev);
907
908         return i915_drm_thaw(drm_dev);
909 }
910
911 static int i915_pm_poweroff(struct device *dev)
912 {
913         struct pci_dev *pdev = to_pci_dev(dev);
914         struct drm_device *drm_dev = pci_get_drvdata(pdev);
915
916         return i915_drm_freeze(drm_dev);
917 }
918
919 static void snb_runtime_suspend(struct drm_i915_private *dev_priv)
920 {
921         struct drm_device *dev = dev_priv->dev;
922
923         intel_runtime_pm_disable_interrupts(dev);
924 }
925
926 static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
927 {
928         hsw_enable_pc8(dev_priv);
929 }
930
931 static void snb_runtime_resume(struct drm_i915_private *dev_priv)
932 {
933         struct drm_device *dev = dev_priv->dev;
934
935         intel_runtime_pm_restore_interrupts(dev);
936         intel_init_pch_refclk(dev);
937         i915_gem_init_swizzling(dev);
938         mutex_lock(&dev_priv->rps.hw_lock);
939         gen6_update_ring_freq(dev);
940         mutex_unlock(&dev_priv->rps.hw_lock);
941 }
942
943 static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
944 {
945         hsw_disable_pc8(dev_priv);
946 }
947
948 static int intel_runtime_suspend(struct device *device)
949 {
950         struct pci_dev *pdev = to_pci_dev(device);
951         struct drm_device *dev = pci_get_drvdata(pdev);
952         struct drm_i915_private *dev_priv = dev->dev_private;
953
954         if (WARN_ON_ONCE(!dev_priv->rps.enabled))
955                 return -ENODEV;
956
957         WARN_ON(!HAS_RUNTIME_PM(dev));
958         assert_force_wake_inactive(dev_priv);
959
960         DRM_DEBUG_KMS("Suspending device\n");
961
962         if (IS_GEN6(dev))
963                 snb_runtime_suspend(dev_priv);
964         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
965                 hsw_runtime_suspend(dev_priv);
966         else
967                 WARN_ON(1);
968
969         i915_gem_release_all_mmaps(dev_priv);
970
971         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
972         dev_priv->pm.suspended = true;
973
974         /*
975          * current versions of firmware which depend on this opregion
976          * notification have repurposed the D1 definition to mean
977          * "runtime suspended" vs. what you would normally expect (D3)
978          * to distinguish it from notifications that might be sent
979          * via the suspend path.
980          */
981         intel_opregion_notify_adapter(dev, PCI_D1);
982
983         DRM_DEBUG_KMS("Device suspended\n");
984         return 0;
985 }
986
987 static int intel_runtime_resume(struct device *device)
988 {
989         struct pci_dev *pdev = to_pci_dev(device);
990         struct drm_device *dev = pci_get_drvdata(pdev);
991         struct drm_i915_private *dev_priv = dev->dev_private;
992
993         WARN_ON(!HAS_RUNTIME_PM(dev));
994
995         DRM_DEBUG_KMS("Resuming device\n");
996
997         intel_opregion_notify_adapter(dev, PCI_D0);
998         dev_priv->pm.suspended = false;
999
1000         if (IS_GEN6(dev))
1001                 snb_runtime_resume(dev_priv);
1002         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1003                 hsw_runtime_resume(dev_priv);
1004         else
1005                 WARN_ON(1);
1006
1007         DRM_DEBUG_KMS("Device resumed\n");
1008         return 0;
1009 }
1010
1011 static const struct dev_pm_ops i915_pm_ops = {
1012         .suspend = i915_pm_suspend,
1013         .suspend_late = i915_pm_suspend_late,
1014         .resume_early = i915_pm_resume_early,
1015         .resume = i915_pm_resume,
1016         .freeze = i915_pm_freeze,
1017         .thaw_early = i915_pm_thaw_early,
1018         .thaw = i915_pm_thaw,
1019         .poweroff = i915_pm_poweroff,
1020         .restore_early = i915_pm_resume_early,
1021         .restore = i915_pm_resume,
1022         .runtime_suspend = intel_runtime_suspend,
1023         .runtime_resume = intel_runtime_resume,
1024 };
1025
1026 static const struct vm_operations_struct i915_gem_vm_ops = {
1027         .fault = i915_gem_fault,
1028         .open = drm_gem_vm_open,
1029         .close = drm_gem_vm_close,
1030 };
1031
1032 static const struct file_operations i915_driver_fops = {
1033         .owner = THIS_MODULE,
1034         .open = drm_open,
1035         .release = drm_release,
1036         .unlocked_ioctl = drm_ioctl,
1037         .mmap = drm_gem_mmap,
1038         .poll = drm_poll,
1039         .read = drm_read,
1040 #ifdef CONFIG_COMPAT
1041         .compat_ioctl = i915_compat_ioctl,
1042 #endif
1043         .llseek = noop_llseek,
1044 };
1045
1046 static struct drm_driver driver = {
1047         /* Don't use MTRRs here; the Xserver or userspace app should
1048          * deal with them for Intel hardware.
1049          */
1050         .driver_features =
1051             DRIVER_USE_AGP |
1052             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1053             DRIVER_RENDER,
1054         .load = i915_driver_load,
1055         .unload = i915_driver_unload,
1056         .open = i915_driver_open,
1057         .lastclose = i915_driver_lastclose,
1058         .preclose = i915_driver_preclose,
1059         .postclose = i915_driver_postclose,
1060
1061         /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1062         .suspend = i915_suspend,
1063         .resume = i915_resume_legacy,
1064
1065         .device_is_agp = i915_driver_device_is_agp,
1066         .master_create = i915_master_create,
1067         .master_destroy = i915_master_destroy,
1068 #if defined(CONFIG_DEBUG_FS)
1069         .debugfs_init = i915_debugfs_init,
1070         .debugfs_cleanup = i915_debugfs_cleanup,
1071 #endif
1072         .gem_free_object = i915_gem_free_object,
1073         .gem_vm_ops = &i915_gem_vm_ops,
1074
1075         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1076         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1077         .gem_prime_export = i915_gem_prime_export,
1078         .gem_prime_import = i915_gem_prime_import,
1079
1080         .dumb_create = i915_gem_dumb_create,
1081         .dumb_map_offset = i915_gem_mmap_gtt,
1082         .dumb_destroy = drm_gem_dumb_destroy,
1083         .ioctls = i915_ioctls,
1084         .fops = &i915_driver_fops,
1085         .name = DRIVER_NAME,
1086         .desc = DRIVER_DESC,
1087         .date = DRIVER_DATE,
1088         .major = DRIVER_MAJOR,
1089         .minor = DRIVER_MINOR,
1090         .patchlevel = DRIVER_PATCHLEVEL,
1091 };
1092
1093 static struct pci_driver i915_pci_driver = {
1094         .name = DRIVER_NAME,
1095         .id_table = pciidlist,
1096         .probe = i915_pci_probe,
1097         .remove = i915_pci_remove,
1098         .driver.pm = &i915_pm_ops,
1099 };
1100
1101 static int __init i915_init(void)
1102 {
1103         driver.num_ioctls = i915_max_ioctl;
1104
1105         /*
1106          * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1107          * explicitly disabled with the module pararmeter.
1108          *
1109          * Otherwise, just follow the parameter (defaulting to off).
1110          *
1111          * Allow optional vga_text_mode_force boot option to override
1112          * the default behavior.
1113          */
1114 #if defined(CONFIG_DRM_I915_KMS)
1115         if (i915.modeset != 0)
1116                 driver.driver_features |= DRIVER_MODESET;
1117 #endif
1118         if (i915.modeset == 1)
1119                 driver.driver_features |= DRIVER_MODESET;
1120
1121 #ifdef CONFIG_VGA_CONSOLE
1122         if (vgacon_text_force() && i915.modeset == -1)
1123                 driver.driver_features &= ~DRIVER_MODESET;
1124 #endif
1125
1126         if (!(driver.driver_features & DRIVER_MODESET)) {
1127                 driver.get_vblank_timestamp = NULL;
1128 #ifndef CONFIG_DRM_I915_UMS
1129                 /* Silently fail loading to not upset userspace. */
1130                 return 0;
1131 #endif
1132         }
1133
1134         return drm_pci_init(&driver, &i915_pci_driver);
1135 }
1136
1137 static void __exit i915_exit(void)
1138 {
1139 #ifndef CONFIG_DRM_I915_UMS
1140         if (!(driver.driver_features & DRIVER_MODESET))
1141                 return; /* Never loaded a driver. */
1142 #endif
1143
1144         drm_pci_exit(&driver, &i915_pci_driver);
1145 }
1146
1147 module_init(i915_init);
1148 module_exit(i915_exit);
1149
1150 MODULE_AUTHOR(DRIVER_AUTHOR);
1151 MODULE_DESCRIPTION(DRIVER_DESC);
1152 MODULE_LICENSE("GPL and additional rights");