Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm...
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <drm/drmP.h>
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include <drm/drm_legacy.h>
35 #include "intel_drv.h"
36 #include <drm/i915_drm.h>
37 #include "i915_drv.h"
38 #include "i915_vgpu.h"
39 #include "i915_trace.h"
40 #include <linux/pci.h>
41 #include <linux/console.h>
42 #include <linux/vt.h>
43 #include <linux/vgaarb.h>
44 #include <linux/acpi.h>
45 #include <linux/pnp.h>
46 #include <linux/vga_switcheroo.h>
47 #include <linux/slab.h>
48 #include <acpi/video.h>
49 #include <linux/pm.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/oom.h>
52
53 static unsigned int i915_load_fail_count;
54
55 bool __i915_inject_load_failure(const char *func, int line)
56 {
57         if (i915_load_fail_count >= i915.inject_load_failure)
58                 return false;
59
60         if (++i915_load_fail_count == i915.inject_load_failure) {
61                 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
62                          i915.inject_load_failure, func, line);
63                 return true;
64         }
65
66         return false;
67 }
68
69 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
70 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
71                     "providing the dmesg log by booting with drm.debug=0xf"
72
73 void
74 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
75               const char *fmt, ...)
76 {
77         static bool shown_bug_once;
78         struct device *dev = dev_priv->dev->dev;
79         bool is_error = level[1] <= KERN_ERR[1];
80         bool is_debug = level[1] == KERN_DEBUG[1];
81         struct va_format vaf;
82         va_list args;
83
84         if (is_debug && !(drm_debug & DRM_UT_DRIVER))
85                 return;
86
87         va_start(args, fmt);
88
89         vaf.fmt = fmt;
90         vaf.va = &args;
91
92         dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
93                    __builtin_return_address(0), &vaf);
94
95         if (is_error && !shown_bug_once) {
96                 dev_notice(dev, "%s", FDO_BUG_MSG);
97                 shown_bug_once = true;
98         }
99
100         va_end(args);
101 }
102
103 static bool i915_error_injected(struct drm_i915_private *dev_priv)
104 {
105         return i915.inject_load_failure &&
106                i915_load_fail_count == i915.inject_load_failure;
107 }
108
109 #define i915_load_error(dev_priv, fmt, ...)                                  \
110         __i915_printk(dev_priv,                                              \
111                       i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
112                       fmt, ##__VA_ARGS__)
113
114 static int i915_getparam(struct drm_device *dev, void *data,
115                          struct drm_file *file_priv)
116 {
117         struct drm_i915_private *dev_priv = dev->dev_private;
118         drm_i915_getparam_t *param = data;
119         int value;
120
121         switch (param->param) {
122         case I915_PARAM_IRQ_ACTIVE:
123         case I915_PARAM_ALLOW_BATCHBUFFER:
124         case I915_PARAM_LAST_DISPATCH:
125                 /* Reject all old ums/dri params. */
126                 return -ENODEV;
127         case I915_PARAM_CHIPSET_ID:
128                 value = dev->pdev->device;
129                 break;
130         case I915_PARAM_REVISION:
131                 value = dev->pdev->revision;
132                 break;
133         case I915_PARAM_HAS_GEM:
134                 value = 1;
135                 break;
136         case I915_PARAM_NUM_FENCES_AVAIL:
137                 value = dev_priv->num_fence_regs;
138                 break;
139         case I915_PARAM_HAS_OVERLAY:
140                 value = dev_priv->overlay ? 1 : 0;
141                 break;
142         case I915_PARAM_HAS_PAGEFLIPPING:
143                 value = 1;
144                 break;
145         case I915_PARAM_HAS_EXECBUF2:
146                 /* depends on GEM */
147                 value = 1;
148                 break;
149         case I915_PARAM_HAS_BSD:
150                 value = intel_engine_initialized(&dev_priv->engine[VCS]);
151                 break;
152         case I915_PARAM_HAS_BLT:
153                 value = intel_engine_initialized(&dev_priv->engine[BCS]);
154                 break;
155         case I915_PARAM_HAS_VEBOX:
156                 value = intel_engine_initialized(&dev_priv->engine[VECS]);
157                 break;
158         case I915_PARAM_HAS_BSD2:
159                 value = intel_engine_initialized(&dev_priv->engine[VCS2]);
160                 break;
161         case I915_PARAM_HAS_RELAXED_FENCING:
162                 value = 1;
163                 break;
164         case I915_PARAM_HAS_COHERENT_RINGS:
165                 value = 1;
166                 break;
167         case I915_PARAM_HAS_EXEC_CONSTANTS:
168                 value = INTEL_INFO(dev)->gen >= 4;
169                 break;
170         case I915_PARAM_HAS_RELAXED_DELTA:
171                 value = 1;
172                 break;
173         case I915_PARAM_HAS_GEN7_SOL_RESET:
174                 value = 1;
175                 break;
176         case I915_PARAM_HAS_LLC:
177                 value = HAS_LLC(dev);
178                 break;
179         case I915_PARAM_HAS_WT:
180                 value = HAS_WT(dev);
181                 break;
182         case I915_PARAM_HAS_ALIASING_PPGTT:
183                 value = USES_PPGTT(dev);
184                 break;
185         case I915_PARAM_HAS_WAIT_TIMEOUT:
186                 value = 1;
187                 break;
188         case I915_PARAM_HAS_SEMAPHORES:
189                 value = i915_semaphore_is_enabled(dev_priv);
190                 break;
191         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
192                 value = 1;
193                 break;
194         case I915_PARAM_HAS_SECURE_BATCHES:
195                 value = capable(CAP_SYS_ADMIN);
196                 break;
197         case I915_PARAM_HAS_PINNED_BATCHES:
198                 value = 1;
199                 break;
200         case I915_PARAM_HAS_EXEC_NO_RELOC:
201                 value = 1;
202                 break;
203         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
204                 value = 1;
205                 break;
206         case I915_PARAM_CMD_PARSER_VERSION:
207                 value = i915_cmd_parser_get_version(dev_priv);
208                 break;
209         case I915_PARAM_HAS_COHERENT_PHYS_GTT:
210                 value = 1;
211                 break;
212         case I915_PARAM_MMAP_VERSION:
213                 value = 1;
214                 break;
215         case I915_PARAM_SUBSLICE_TOTAL:
216                 value = INTEL_INFO(dev)->subslice_total;
217                 if (!value)
218                         return -ENODEV;
219                 break;
220         case I915_PARAM_EU_TOTAL:
221                 value = INTEL_INFO(dev)->eu_total;
222                 if (!value)
223                         return -ENODEV;
224                 break;
225         case I915_PARAM_HAS_GPU_RESET:
226                 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
227                 break;
228         case I915_PARAM_HAS_RESOURCE_STREAMER:
229                 value = HAS_RESOURCE_STREAMER(dev);
230                 break;
231         case I915_PARAM_HAS_EXEC_SOFTPIN:
232                 value = 1;
233                 break;
234         default:
235                 DRM_DEBUG("Unknown parameter %d\n", param->param);
236                 return -EINVAL;
237         }
238
239         if (copy_to_user(param->value, &value, sizeof(int))) {
240                 DRM_ERROR("copy_to_user failed\n");
241                 return -EFAULT;
242         }
243
244         return 0;
245 }
246
247 static int i915_get_bridge_dev(struct drm_device *dev)
248 {
249         struct drm_i915_private *dev_priv = dev->dev_private;
250
251         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
252         if (!dev_priv->bridge_dev) {
253                 DRM_ERROR("bridge device not found\n");
254                 return -1;
255         }
256         return 0;
257 }
258
259 /* Allocate space for the MCH regs if needed, return nonzero on error */
260 static int
261 intel_alloc_mchbar_resource(struct drm_device *dev)
262 {
263         struct drm_i915_private *dev_priv = dev->dev_private;
264         int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
265         u32 temp_lo, temp_hi = 0;
266         u64 mchbar_addr;
267         int ret;
268
269         if (INTEL_INFO(dev)->gen >= 4)
270                 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
271         pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
272         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
273
274         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
275 #ifdef CONFIG_PNP
276         if (mchbar_addr &&
277             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
278                 return 0;
279 #endif
280
281         /* Get some space for it */
282         dev_priv->mch_res.name = "i915 MCHBAR";
283         dev_priv->mch_res.flags = IORESOURCE_MEM;
284         ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
285                                      &dev_priv->mch_res,
286                                      MCHBAR_SIZE, MCHBAR_SIZE,
287                                      PCIBIOS_MIN_MEM,
288                                      0, pcibios_align_resource,
289                                      dev_priv->bridge_dev);
290         if (ret) {
291                 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
292                 dev_priv->mch_res.start = 0;
293                 return ret;
294         }
295
296         if (INTEL_INFO(dev)->gen >= 4)
297                 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
298                                        upper_32_bits(dev_priv->mch_res.start));
299
300         pci_write_config_dword(dev_priv->bridge_dev, reg,
301                                lower_32_bits(dev_priv->mch_res.start));
302         return 0;
303 }
304
305 /* Setup MCHBAR if possible, return true if we should disable it again */
306 static void
307 intel_setup_mchbar(struct drm_device *dev)
308 {
309         struct drm_i915_private *dev_priv = dev->dev_private;
310         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
311         u32 temp;
312         bool enabled;
313
314         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
315                 return;
316
317         dev_priv->mchbar_need_disable = false;
318
319         if (IS_I915G(dev) || IS_I915GM(dev)) {
320                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
321                 enabled = !!(temp & DEVEN_MCHBAR_EN);
322         } else {
323                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
324                 enabled = temp & 1;
325         }
326
327         /* If it's already enabled, don't have to do anything */
328         if (enabled)
329                 return;
330
331         if (intel_alloc_mchbar_resource(dev))
332                 return;
333
334         dev_priv->mchbar_need_disable = true;
335
336         /* Space is allocated or reserved, so enable it. */
337         if (IS_I915G(dev) || IS_I915GM(dev)) {
338                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
339                                        temp | DEVEN_MCHBAR_EN);
340         } else {
341                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
342                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
343         }
344 }
345
346 static void
347 intel_teardown_mchbar(struct drm_device *dev)
348 {
349         struct drm_i915_private *dev_priv = dev->dev_private;
350         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
351
352         if (dev_priv->mchbar_need_disable) {
353                 if (IS_I915G(dev) || IS_I915GM(dev)) {
354                         u32 deven_val;
355
356                         pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
357                                               &deven_val);
358                         deven_val &= ~DEVEN_MCHBAR_EN;
359                         pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
360                                                deven_val);
361                 } else {
362                         u32 mchbar_val;
363
364                         pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
365                                               &mchbar_val);
366                         mchbar_val &= ~1;
367                         pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
368                                                mchbar_val);
369                 }
370         }
371
372         if (dev_priv->mch_res.start)
373                 release_resource(&dev_priv->mch_res);
374 }
375
376 /* true = enable decode, false = disable decoder */
377 static unsigned int i915_vga_set_decode(void *cookie, bool state)
378 {
379         struct drm_device *dev = cookie;
380
381         intel_modeset_vga_set_state(dev, state);
382         if (state)
383                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
384                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
385         else
386                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
387 }
388
389 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
390 {
391         struct drm_device *dev = pci_get_drvdata(pdev);
392         pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
393
394         if (state == VGA_SWITCHEROO_ON) {
395                 pr_info("switched on\n");
396                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
397                 /* i915 resume handler doesn't set to D0 */
398                 pci_set_power_state(dev->pdev, PCI_D0);
399                 i915_resume_switcheroo(dev);
400                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
401         } else {
402                 pr_info("switched off\n");
403                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
404                 i915_suspend_switcheroo(dev, pmm);
405                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
406         }
407 }
408
409 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
410 {
411         struct drm_device *dev = pci_get_drvdata(pdev);
412
413         /*
414          * FIXME: open_count is protected by drm_global_mutex but that would lead to
415          * locking inversion with the driver load path. And the access here is
416          * completely racy anyway. So don't bother with locking for now.
417          */
418         return dev->open_count == 0;
419 }
420
421 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
422         .set_gpu_state = i915_switcheroo_set_state,
423         .reprobe = NULL,
424         .can_switch = i915_switcheroo_can_switch,
425 };
426
427 static void i915_gem_fini(struct drm_device *dev)
428 {
429         struct drm_i915_private *dev_priv = to_i915(dev);
430
431         /*
432          * Neither the BIOS, ourselves or any other kernel
433          * expects the system to be in execlists mode on startup,
434          * so we need to reset the GPU back to legacy mode. And the only
435          * known way to disable logical contexts is through a GPU reset.
436          *
437          * So in order to leave the system in a known default configuration,
438          * always reset the GPU upon unload. Afterwards we then clean up the
439          * GEM state tracking, flushing off the requests and leaving the
440          * system in a known idle state.
441          *
442          * Note that is of the upmost importance that the GPU is idle and
443          * all stray writes are flushed *before* we dismantle the backing
444          * storage for the pinned objects.
445          *
446          * However, since we are uncertain that reseting the GPU on older
447          * machines is a good idea, we don't - just in case it leaves the
448          * machine in an unusable condition.
449          */
450         if (HAS_HW_CONTEXTS(dev)) {
451                 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
452                 WARN_ON(reset && reset != -ENODEV);
453         }
454
455         mutex_lock(&dev->struct_mutex);
456         i915_gem_reset(dev);
457         i915_gem_cleanup_engines(dev);
458         i915_gem_context_fini(dev);
459         mutex_unlock(&dev->struct_mutex);
460
461         WARN_ON(!list_empty(&to_i915(dev)->context_list));
462 }
463
464 static int i915_load_modeset_init(struct drm_device *dev)
465 {
466         struct drm_i915_private *dev_priv = dev->dev_private;
467         int ret;
468
469         if (i915_inject_load_failure())
470                 return -ENODEV;
471
472         ret = intel_bios_init(dev_priv);
473         if (ret)
474                 DRM_INFO("failed to find VBIOS tables\n");
475
476         /* If we have > 1 VGA cards, then we need to arbitrate access
477          * to the common VGA resources.
478          *
479          * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
480          * then we do not take part in VGA arbitration and the
481          * vga_client_register() fails with -ENODEV.
482          */
483         ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
484         if (ret && ret != -ENODEV)
485                 goto out;
486
487         intel_register_dsm_handler();
488
489         ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
490         if (ret)
491                 goto cleanup_vga_client;
492
493         /* must happen before intel_power_domains_init_hw() on VLV/CHV */
494         intel_update_rawclk(dev_priv);
495
496         intel_power_domains_init_hw(dev_priv, false);
497
498         intel_csr_ucode_init(dev_priv);
499
500         ret = intel_irq_install(dev_priv);
501         if (ret)
502                 goto cleanup_csr;
503
504         intel_setup_gmbus(dev);
505
506         /* Important: The output setup functions called by modeset_init need
507          * working irqs for e.g. gmbus and dp aux transfers. */
508         intel_modeset_init(dev);
509
510         intel_guc_ucode_init(dev);
511
512         ret = i915_gem_init(dev);
513         if (ret)
514                 goto cleanup_irq;
515
516         intel_modeset_gem_init(dev);
517
518         if (INTEL_INFO(dev)->num_pipes == 0)
519                 return 0;
520
521         ret = intel_fbdev_init(dev);
522         if (ret)
523                 goto cleanup_gem;
524
525         /* Only enable hotplug handling once the fbdev is fully set up. */
526         intel_hpd_init(dev_priv);
527
528         /*
529          * Some ports require correctly set-up hpd registers for detection to
530          * work properly (leading to ghost connected connector status), e.g. VGA
531          * on gm45.  Hence we can only set up the initial fbdev config after hpd
532          * irqs are fully enabled. Now we should scan for the initial config
533          * only once hotplug handling is enabled, but due to screwed-up locking
534          * around kms/fbdev init we can't protect the fdbev initial config
535          * scanning against hotplug events. Hence do this first and ignore the
536          * tiny window where we will loose hotplug notifactions.
537          */
538         intel_fbdev_initial_config_async(dev);
539
540         drm_kms_helper_poll_init(dev);
541
542         return 0;
543
544 cleanup_gem:
545         i915_gem_fini(dev);
546 cleanup_irq:
547         intel_guc_ucode_fini(dev);
548         drm_irq_uninstall(dev);
549         intel_teardown_gmbus(dev);
550 cleanup_csr:
551         intel_csr_ucode_fini(dev_priv);
552         intel_power_domains_fini(dev_priv);
553         vga_switcheroo_unregister_client(dev->pdev);
554 cleanup_vga_client:
555         vga_client_register(dev->pdev, NULL, NULL, NULL);
556 out:
557         return ret;
558 }
559
560 #if IS_ENABLED(CONFIG_FB)
561 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
562 {
563         struct apertures_struct *ap;
564         struct pci_dev *pdev = dev_priv->dev->pdev;
565         struct i915_ggtt *ggtt = &dev_priv->ggtt;
566         bool primary;
567         int ret;
568
569         ap = alloc_apertures(1);
570         if (!ap)
571                 return -ENOMEM;
572
573         ap->ranges[0].base = ggtt->mappable_base;
574         ap->ranges[0].size = ggtt->mappable_end;
575
576         primary =
577                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
578
579         ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
580
581         kfree(ap);
582
583         return ret;
584 }
585 #else
586 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
587 {
588         return 0;
589 }
590 #endif
591
592 #if !defined(CONFIG_VGA_CONSOLE)
593 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
594 {
595         return 0;
596 }
597 #elif !defined(CONFIG_DUMMY_CONSOLE)
598 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
599 {
600         return -ENODEV;
601 }
602 #else
603 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
604 {
605         int ret = 0;
606
607         DRM_INFO("Replacing VGA console driver\n");
608
609         console_lock();
610         if (con_is_bound(&vga_con))
611                 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
612         if (ret == 0) {
613                 ret = do_unregister_con_driver(&vga_con);
614
615                 /* Ignore "already unregistered". */
616                 if (ret == -ENODEV)
617                         ret = 0;
618         }
619         console_unlock();
620
621         return ret;
622 }
623 #endif
624
625 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
626 {
627         const struct intel_device_info *info = &dev_priv->info;
628
629 #define PRINT_S(name) "%s"
630 #define SEP_EMPTY
631 #define PRINT_FLAG(name) info->name ? #name "," : ""
632 #define SEP_COMMA ,
633         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
634                          DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
635                          info->gen,
636                          dev_priv->dev->pdev->device,
637                          dev_priv->dev->pdev->revision,
638                          DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
639 #undef PRINT_S
640 #undef SEP_EMPTY
641 #undef PRINT_FLAG
642 #undef SEP_COMMA
643 }
644
645 static void cherryview_sseu_info_init(struct drm_device *dev)
646 {
647         struct drm_i915_private *dev_priv = dev->dev_private;
648         struct intel_device_info *info;
649         u32 fuse, eu_dis;
650
651         info = (struct intel_device_info *)&dev_priv->info;
652         fuse = I915_READ(CHV_FUSE_GT);
653
654         info->slice_total = 1;
655
656         if (!(fuse & CHV_FGT_DISABLE_SS0)) {
657                 info->subslice_per_slice++;
658                 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
659                                  CHV_FGT_EU_DIS_SS0_R1_MASK);
660                 info->eu_total += 8 - hweight32(eu_dis);
661         }
662
663         if (!(fuse & CHV_FGT_DISABLE_SS1)) {
664                 info->subslice_per_slice++;
665                 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
666                                  CHV_FGT_EU_DIS_SS1_R1_MASK);
667                 info->eu_total += 8 - hweight32(eu_dis);
668         }
669
670         info->subslice_total = info->subslice_per_slice;
671         /*
672          * CHV expected to always have a uniform distribution of EU
673          * across subslices.
674         */
675         info->eu_per_subslice = info->subslice_total ?
676                                 info->eu_total / info->subslice_total :
677                                 0;
678         /*
679          * CHV supports subslice power gating on devices with more than
680          * one subslice, and supports EU power gating on devices with
681          * more than one EU pair per subslice.
682         */
683         info->has_slice_pg = 0;
684         info->has_subslice_pg = (info->subslice_total > 1);
685         info->has_eu_pg = (info->eu_per_subslice > 2);
686 }
687
688 static void gen9_sseu_info_init(struct drm_device *dev)
689 {
690         struct drm_i915_private *dev_priv = dev->dev_private;
691         struct intel_device_info *info;
692         int s_max = 3, ss_max = 4, eu_max = 8;
693         int s, ss;
694         u32 fuse2, s_enable, ss_disable, eu_disable;
695         u8 eu_mask = 0xff;
696
697         info = (struct intel_device_info *)&dev_priv->info;
698         fuse2 = I915_READ(GEN8_FUSE2);
699         s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
700                    GEN8_F2_S_ENA_SHIFT;
701         ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
702                      GEN9_F2_SS_DIS_SHIFT;
703
704         info->slice_total = hweight32(s_enable);
705         /*
706          * The subslice disable field is global, i.e. it applies
707          * to each of the enabled slices.
708         */
709         info->subslice_per_slice = ss_max - hweight32(ss_disable);
710         info->subslice_total = info->slice_total *
711                                info->subslice_per_slice;
712
713         /*
714          * Iterate through enabled slices and subslices to
715          * count the total enabled EU.
716         */
717         for (s = 0; s < s_max; s++) {
718                 if (!(s_enable & (0x1 << s)))
719                         /* skip disabled slice */
720                         continue;
721
722                 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
723                 for (ss = 0; ss < ss_max; ss++) {
724                         int eu_per_ss;
725
726                         if (ss_disable & (0x1 << ss))
727                                 /* skip disabled subslice */
728                                 continue;
729
730                         eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
731                                                       eu_mask);
732
733                         /*
734                          * Record which subslice(s) has(have) 7 EUs. we
735                          * can tune the hash used to spread work among
736                          * subslices if they are unbalanced.
737                          */
738                         if (eu_per_ss == 7)
739                                 info->subslice_7eu[s] |= 1 << ss;
740
741                         info->eu_total += eu_per_ss;
742                 }
743         }
744
745         /*
746          * SKL is expected to always have a uniform distribution
747          * of EU across subslices with the exception that any one
748          * EU in any one subslice may be fused off for die
749          * recovery. BXT is expected to be perfectly uniform in EU
750          * distribution.
751         */
752         info->eu_per_subslice = info->subslice_total ?
753                                 DIV_ROUND_UP(info->eu_total,
754                                              info->subslice_total) : 0;
755         /*
756          * SKL supports slice power gating on devices with more than
757          * one slice, and supports EU power gating on devices with
758          * more than one EU pair per subslice. BXT supports subslice
759          * power gating on devices with more than one subslice, and
760          * supports EU power gating on devices with more than one EU
761          * pair per subslice.
762         */
763         info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
764                                (info->slice_total > 1));
765         info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
766         info->has_eu_pg = (info->eu_per_subslice > 2);
767 }
768
769 static void broadwell_sseu_info_init(struct drm_device *dev)
770 {
771         struct drm_i915_private *dev_priv = dev->dev_private;
772         struct intel_device_info *info;
773         const int s_max = 3, ss_max = 3, eu_max = 8;
774         int s, ss;
775         u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
776
777         fuse2 = I915_READ(GEN8_FUSE2);
778         s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
779         ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
780
781         eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
782         eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
783                         ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
784                          (32 - GEN8_EU_DIS0_S1_SHIFT));
785         eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
786                         ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
787                          (32 - GEN8_EU_DIS1_S2_SHIFT));
788
789
790         info = (struct intel_device_info *)&dev_priv->info;
791         info->slice_total = hweight32(s_enable);
792
793         /*
794          * The subslice disable field is global, i.e. it applies
795          * to each of the enabled slices.
796          */
797         info->subslice_per_slice = ss_max - hweight32(ss_disable);
798         info->subslice_total = info->slice_total * info->subslice_per_slice;
799
800         /*
801          * Iterate through enabled slices and subslices to
802          * count the total enabled EU.
803          */
804         for (s = 0; s < s_max; s++) {
805                 if (!(s_enable & (0x1 << s)))
806                         /* skip disabled slice */
807                         continue;
808
809                 for (ss = 0; ss < ss_max; ss++) {
810                         u32 n_disabled;
811
812                         if (ss_disable & (0x1 << ss))
813                                 /* skip disabled subslice */
814                                 continue;
815
816                         n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
817
818                         /*
819                          * Record which subslices have 7 EUs.
820                          */
821                         if (eu_max - n_disabled == 7)
822                                 info->subslice_7eu[s] |= 1 << ss;
823
824                         info->eu_total += eu_max - n_disabled;
825                 }
826         }
827
828         /*
829          * BDW is expected to always have a uniform distribution of EU across
830          * subslices with the exception that any one EU in any one subslice may
831          * be fused off for die recovery.
832          */
833         info->eu_per_subslice = info->subslice_total ?
834                 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
835
836         /*
837          * BDW supports slice power gating on devices with more than
838          * one slice.
839          */
840         info->has_slice_pg = (info->slice_total > 1);
841         info->has_subslice_pg = 0;
842         info->has_eu_pg = 0;
843 }
844
845 /*
846  * Determine various intel_device_info fields at runtime.
847  *
848  * Use it when either:
849  *   - it's judged too laborious to fill n static structures with the limit
850  *     when a simple if statement does the job,
851  *   - run-time checks (eg read fuse/strap registers) are needed.
852  *
853  * This function needs to be called:
854  *   - after the MMIO has been setup as we are reading registers,
855  *   - after the PCH has been detected,
856  *   - before the first usage of the fields it can tweak.
857  */
858 static void intel_device_info_runtime_init(struct drm_device *dev)
859 {
860         struct drm_i915_private *dev_priv = dev->dev_private;
861         struct intel_device_info *info;
862         enum pipe pipe;
863
864         info = (struct intel_device_info *)&dev_priv->info;
865
866         /*
867          * Skylake and Broxton currently don't expose the topmost plane as its
868          * use is exclusive with the legacy cursor and we only want to expose
869          * one of those, not both. Until we can safely expose the topmost plane
870          * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
871          * we don't expose the topmost plane at all to prevent ABI breakage
872          * down the line.
873          */
874         if (IS_BROXTON(dev)) {
875                 info->num_sprites[PIPE_A] = 2;
876                 info->num_sprites[PIPE_B] = 2;
877                 info->num_sprites[PIPE_C] = 1;
878         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
879                 for_each_pipe(dev_priv, pipe)
880                         info->num_sprites[pipe] = 2;
881         else
882                 for_each_pipe(dev_priv, pipe)
883                         info->num_sprites[pipe] = 1;
884
885         if (i915.disable_display) {
886                 DRM_INFO("Display disabled (module parameter)\n");
887                 info->num_pipes = 0;
888         } else if (info->num_pipes > 0 &&
889                    (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
890                    HAS_PCH_SPLIT(dev)) {
891                 u32 fuse_strap = I915_READ(FUSE_STRAP);
892                 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
893
894                 /*
895                  * SFUSE_STRAP is supposed to have a bit signalling the display
896                  * is fused off. Unfortunately it seems that, at least in
897                  * certain cases, fused off display means that PCH display
898                  * reads don't land anywhere. In that case, we read 0s.
899                  *
900                  * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
901                  * should be set when taking over after the firmware.
902                  */
903                 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
904                     sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
905                     (dev_priv->pch_type == PCH_CPT &&
906                      !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
907                         DRM_INFO("Display fused off, disabling\n");
908                         info->num_pipes = 0;
909                 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
910                         DRM_INFO("PipeC fused off\n");
911                         info->num_pipes -= 1;
912                 }
913         } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
914                 u32 dfsm = I915_READ(SKL_DFSM);
915                 u8 disabled_mask = 0;
916                 bool invalid;
917                 int num_bits;
918
919                 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
920                         disabled_mask |= BIT(PIPE_A);
921                 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
922                         disabled_mask |= BIT(PIPE_B);
923                 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
924                         disabled_mask |= BIT(PIPE_C);
925
926                 num_bits = hweight8(disabled_mask);
927
928                 switch (disabled_mask) {
929                 case BIT(PIPE_A):
930                 case BIT(PIPE_B):
931                 case BIT(PIPE_A) | BIT(PIPE_B):
932                 case BIT(PIPE_A) | BIT(PIPE_C):
933                         invalid = true;
934                         break;
935                 default:
936                         invalid = false;
937                 }
938
939                 if (num_bits > info->num_pipes || invalid)
940                         DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
941                                   disabled_mask);
942                 else
943                         info->num_pipes -= num_bits;
944         }
945
946         /* Initialize slice/subslice/EU info */
947         if (IS_CHERRYVIEW(dev))
948                 cherryview_sseu_info_init(dev);
949         else if (IS_BROADWELL(dev))
950                 broadwell_sseu_info_init(dev);
951         else if (INTEL_INFO(dev)->gen >= 9)
952                 gen9_sseu_info_init(dev);
953
954         info->has_snoop = !info->has_llc;
955
956         /* Snooping is broken on BXT A stepping. */
957         if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
958                 info->has_snoop = false;
959
960         DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
961         DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
962         DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
963         DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
964         DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
965         DRM_DEBUG_DRIVER("has slice power gating: %s\n",
966                          info->has_slice_pg ? "y" : "n");
967         DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
968                          info->has_subslice_pg ? "y" : "n");
969         DRM_DEBUG_DRIVER("has EU power gating: %s\n",
970                          info->has_eu_pg ? "y" : "n");
971
972         i915.enable_execlists =
973                 intel_sanitize_enable_execlists(dev_priv,
974                                                 i915.enable_execlists);
975
976         /*
977          * i915.enable_ppgtt is read-only, so do an early pass to validate the
978          * user's requested state against the hardware/driver capabilities.  We
979          * do this now so that we can print out any log messages once rather
980          * than every time we check intel_enable_ppgtt().
981          */
982         i915.enable_ppgtt =
983                 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
984         DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
985 }
986
987 static void intel_init_dpio(struct drm_i915_private *dev_priv)
988 {
989         /*
990          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
991          * CHV x1 PHY (DP/HDMI D)
992          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
993          */
994         if (IS_CHERRYVIEW(dev_priv)) {
995                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
996                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
997         } else if (IS_VALLEYVIEW(dev_priv)) {
998                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
999         }
1000 }
1001
1002 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
1003 {
1004         /*
1005          * The i915 workqueue is primarily used for batched retirement of
1006          * requests (and thus managing bo) once the task has been completed
1007          * by the GPU. i915_gem_retire_requests() is called directly when we
1008          * need high-priority retirement, such as waiting for an explicit
1009          * bo.
1010          *
1011          * It is also used for periodic low-priority events, such as
1012          * idle-timers and recording error state.
1013          *
1014          * All tasks on the workqueue are expected to acquire the dev mutex
1015          * so there is no point in running more than one instance of the
1016          * workqueue at any time.  Use an ordered one.
1017          */
1018         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1019         if (dev_priv->wq == NULL)
1020                 goto out_err;
1021
1022         dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1023         if (dev_priv->hotplug.dp_wq == NULL)
1024                 goto out_free_wq;
1025
1026         dev_priv->gpu_error.hangcheck_wq =
1027                 alloc_ordered_workqueue("i915-hangcheck", 0);
1028         if (dev_priv->gpu_error.hangcheck_wq == NULL)
1029                 goto out_free_dp_wq;
1030
1031         return 0;
1032
1033 out_free_dp_wq:
1034         destroy_workqueue(dev_priv->hotplug.dp_wq);
1035 out_free_wq:
1036         destroy_workqueue(dev_priv->wq);
1037 out_err:
1038         DRM_ERROR("Failed to allocate workqueues.\n");
1039
1040         return -ENOMEM;
1041 }
1042
1043 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
1044 {
1045         destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1046         destroy_workqueue(dev_priv->hotplug.dp_wq);
1047         destroy_workqueue(dev_priv->wq);
1048 }
1049
1050 /**
1051  * i915_driver_init_early - setup state not requiring device access
1052  * @dev_priv: device private
1053  *
1054  * Initialize everything that is a "SW-only" state, that is state not
1055  * requiring accessing the device or exposing the driver via kernel internal
1056  * or userspace interfaces. Example steps belonging here: lock initialization,
1057  * system memory allocation, setting up device specific attributes and
1058  * function hooks not requiring accessing the device.
1059  */
1060 static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1061                                   struct drm_device *dev,
1062                                   struct intel_device_info *info)
1063 {
1064         struct intel_device_info *device_info;
1065         int ret = 0;
1066
1067         if (i915_inject_load_failure())
1068                 return -ENODEV;
1069
1070         /* Setup the write-once "constant" device info */
1071         device_info = (struct intel_device_info *)&dev_priv->info;
1072         memcpy(device_info, info, sizeof(dev_priv->info));
1073         device_info->device_id = dev->pdev->device;
1074
1075         BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
1076         device_info->gen_mask = BIT(device_info->gen - 1);
1077
1078         spin_lock_init(&dev_priv->irq_lock);
1079         spin_lock_init(&dev_priv->gpu_error.lock);
1080         mutex_init(&dev_priv->backlight_lock);
1081         spin_lock_init(&dev_priv->uncore.lock);
1082         spin_lock_init(&dev_priv->mm.object_stat_lock);
1083         spin_lock_init(&dev_priv->mmio_flip_lock);
1084         mutex_init(&dev_priv->sb_lock);
1085         mutex_init(&dev_priv->modeset_restore_lock);
1086         mutex_init(&dev_priv->av_mutex);
1087         mutex_init(&dev_priv->wm.wm_mutex);
1088         mutex_init(&dev_priv->pps_mutex);
1089
1090         ret = i915_workqueues_init(dev_priv);
1091         if (ret < 0)
1092                 return ret;
1093
1094         /* This must be called before any calls to HAS_PCH_* */
1095         intel_detect_pch(dev);
1096
1097         intel_pm_setup(dev);
1098         intel_init_dpio(dev_priv);
1099         intel_power_domains_init(dev_priv);
1100         intel_irq_init(dev_priv);
1101         intel_init_display_hooks(dev_priv);
1102         intel_init_clock_gating_hooks(dev_priv);
1103         intel_init_audio_hooks(dev_priv);
1104         i915_gem_load_init(dev);
1105
1106         intel_display_crc_init(dev);
1107
1108         i915_dump_device_info(dev_priv);
1109
1110         /* Not all pre-production machines fall into this category, only the
1111          * very first ones. Almost everything should work, except for maybe
1112          * suspend/resume. And we don't implement workarounds that affect only
1113          * pre-production machines. */
1114         if (IS_HSW_EARLY_SDV(dev))
1115                 DRM_INFO("This is an early pre-production Haswell machine. "
1116                          "It may not be fully functional.\n");
1117
1118         return 0;
1119 }
1120
1121 /**
1122  * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
1123  * @dev_priv: device private
1124  */
1125 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
1126 {
1127         i915_gem_load_cleanup(dev_priv->dev);
1128         i915_workqueues_cleanup(dev_priv);
1129 }
1130
1131 static int i915_mmio_setup(struct drm_device *dev)
1132 {
1133         struct drm_i915_private *dev_priv = to_i915(dev);
1134         int mmio_bar;
1135         int mmio_size;
1136
1137         mmio_bar = IS_GEN2(dev) ? 1 : 0;
1138         /*
1139          * Before gen4, the registers and the GTT are behind different BARs.
1140          * However, from gen4 onwards, the registers and the GTT are shared
1141          * in the same BAR, so we want to restrict this ioremap from
1142          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1143          * the register BAR remains the same size for all the earlier
1144          * generations up to Ironlake.
1145          */
1146         if (INTEL_INFO(dev)->gen < 5)
1147                 mmio_size = 512 * 1024;
1148         else
1149                 mmio_size = 2 * 1024 * 1024;
1150         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1151         if (dev_priv->regs == NULL) {
1152                 DRM_ERROR("failed to map registers\n");
1153
1154                 return -EIO;
1155         }
1156
1157         /* Try to make sure MCHBAR is enabled before poking at it */
1158         intel_setup_mchbar(dev);
1159
1160         return 0;
1161 }
1162
1163 static void i915_mmio_cleanup(struct drm_device *dev)
1164 {
1165         struct drm_i915_private *dev_priv = to_i915(dev);
1166
1167         intel_teardown_mchbar(dev);
1168         pci_iounmap(dev->pdev, dev_priv->regs);
1169 }
1170
1171 /**
1172  * i915_driver_init_mmio - setup device MMIO
1173  * @dev_priv: device private
1174  *
1175  * Setup minimal device state necessary for MMIO accesses later in the
1176  * initialization sequence. The setup here should avoid any other device-wide
1177  * side effects or exposing the driver via kernel internal or user space
1178  * interfaces.
1179  */
1180 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1181 {
1182         struct drm_device *dev = dev_priv->dev;
1183         int ret;
1184
1185         if (i915_inject_load_failure())
1186                 return -ENODEV;
1187
1188         if (i915_get_bridge_dev(dev))
1189                 return -EIO;
1190
1191         ret = i915_mmio_setup(dev);
1192         if (ret < 0)
1193                 goto put_bridge;
1194
1195         intel_uncore_init(dev_priv);
1196
1197         return 0;
1198
1199 put_bridge:
1200         pci_dev_put(dev_priv->bridge_dev);
1201
1202         return ret;
1203 }
1204
1205 /**
1206  * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1207  * @dev_priv: device private
1208  */
1209 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1210 {
1211         struct drm_device *dev = dev_priv->dev;
1212
1213         intel_uncore_fini(dev_priv);
1214         i915_mmio_cleanup(dev);
1215         pci_dev_put(dev_priv->bridge_dev);
1216 }
1217
1218 /**
1219  * i915_driver_init_hw - setup state requiring device access
1220  * @dev_priv: device private
1221  *
1222  * Setup state that requires accessing the device, but doesn't require
1223  * exposing the driver via kernel internal or userspace interfaces.
1224  */
1225 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1226 {
1227         struct drm_device *dev = dev_priv->dev;
1228         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1229         uint32_t aperture_size;
1230         int ret;
1231
1232         if (i915_inject_load_failure())
1233                 return -ENODEV;
1234
1235         intel_device_info_runtime_init(dev);
1236
1237         ret = i915_ggtt_init_hw(dev);
1238         if (ret)
1239                 return ret;
1240
1241         ret = i915_ggtt_enable_hw(dev);
1242         if (ret) {
1243                 DRM_ERROR("failed to enable GGTT\n");
1244                 goto out_ggtt;
1245         }
1246
1247         /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1248          * otherwise the vga fbdev driver falls over. */
1249         ret = i915_kick_out_firmware_fb(dev_priv);
1250         if (ret) {
1251                 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1252                 goto out_ggtt;
1253         }
1254
1255         ret = i915_kick_out_vgacon(dev_priv);
1256         if (ret) {
1257                 DRM_ERROR("failed to remove conflicting VGA console\n");
1258                 goto out_ggtt;
1259         }
1260
1261         pci_set_master(dev->pdev);
1262
1263         /* overlay on gen2 is broken and can't address above 1G */
1264         if (IS_GEN2(dev)) {
1265                 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1266                 if (ret) {
1267                         DRM_ERROR("failed to set DMA mask\n");
1268
1269                         goto out_ggtt;
1270                 }
1271         }
1272
1273
1274         /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1275          * using 32bit addressing, overwriting memory if HWS is located
1276          * above 4GB.
1277          *
1278          * The documentation also mentions an issue with undefined
1279          * behaviour if any general state is accessed within a page above 4GB,
1280          * which also needs to be handled carefully.
1281          */
1282         if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
1283                 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1284
1285                 if (ret) {
1286                         DRM_ERROR("failed to set DMA mask\n");
1287
1288                         goto out_ggtt;
1289                 }
1290         }
1291
1292         aperture_size = ggtt->mappable_end;
1293
1294         ggtt->mappable =
1295                 io_mapping_create_wc(ggtt->mappable_base,
1296                                      aperture_size);
1297         if (!ggtt->mappable) {
1298                 ret = -EIO;
1299                 goto out_ggtt;
1300         }
1301
1302         ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
1303                                               aperture_size);
1304
1305         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1306                            PM_QOS_DEFAULT_VALUE);
1307
1308         intel_uncore_sanitize(dev_priv);
1309
1310         intel_opregion_setup(dev);
1311
1312         i915_gem_load_init_fences(dev_priv);
1313
1314         /* On the 945G/GM, the chipset reports the MSI capability on the
1315          * integrated graphics even though the support isn't actually there
1316          * according to the published specs.  It doesn't appear to function
1317          * correctly in testing on 945G.
1318          * This may be a side effect of MSI having been made available for PEG
1319          * and the registers being closely associated.
1320          *
1321          * According to chipset errata, on the 965GM, MSI interrupts may
1322          * be lost or delayed, but we use them anyways to avoid
1323          * stuck interrupts on some machines.
1324          */
1325         if (!IS_I945G(dev) && !IS_I945GM(dev)) {
1326                 if (pci_enable_msi(dev->pdev) < 0)
1327                         DRM_DEBUG_DRIVER("can't enable MSI");
1328         }
1329
1330         return 0;
1331
1332 out_ggtt:
1333         i915_ggtt_cleanup_hw(dev);
1334
1335         return ret;
1336 }
1337
1338 /**
1339  * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1340  * @dev_priv: device private
1341  */
1342 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1343 {
1344         struct drm_device *dev = dev_priv->dev;
1345         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1346
1347         if (dev->pdev->msi_enabled)
1348                 pci_disable_msi(dev->pdev);
1349
1350         pm_qos_remove_request(&dev_priv->pm_qos);
1351         arch_phys_wc_del(ggtt->mtrr);
1352         io_mapping_free(ggtt->mappable);
1353         i915_ggtt_cleanup_hw(dev);
1354 }
1355
1356 /**
1357  * i915_driver_register - register the driver with the rest of the system
1358  * @dev_priv: device private
1359  *
1360  * Perform any steps necessary to make the driver available via kernel
1361  * internal or userspace interfaces.
1362  */
1363 static void i915_driver_register(struct drm_i915_private *dev_priv)
1364 {
1365         struct drm_device *dev = dev_priv->dev;
1366
1367         i915_gem_shrinker_init(dev_priv);
1368         /*
1369          * Notify a valid surface after modesetting,
1370          * when running inside a VM.
1371          */
1372         if (intel_vgpu_active(dev_priv))
1373                 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1374
1375         i915_setup_sysfs(dev);
1376
1377         if (INTEL_INFO(dev_priv)->num_pipes) {
1378                 /* Must be done after probing outputs */
1379                 intel_opregion_init(dev);
1380                 acpi_video_register();
1381         }
1382
1383         if (IS_GEN5(dev_priv))
1384                 intel_gpu_ips_init(dev_priv);
1385
1386         i915_audio_component_init(dev_priv);
1387 }
1388
1389 /**
1390  * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1391  * @dev_priv: device private
1392  */
1393 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1394 {
1395         i915_audio_component_cleanup(dev_priv);
1396         intel_gpu_ips_teardown();
1397         acpi_video_unregister();
1398         intel_opregion_fini(dev_priv->dev);
1399         i915_teardown_sysfs(dev_priv->dev);
1400         i915_gem_shrinker_cleanup(dev_priv);
1401 }
1402
1403 /**
1404  * i915_driver_load - setup chip and create an initial config
1405  * @dev: DRM device
1406  * @flags: startup flags
1407  *
1408  * The driver load routine has to do several things:
1409  *   - drive output discovery via intel_modeset_init()
1410  *   - initialize the memory manager
1411  *   - allocate initial config memory
1412  *   - setup the DRM framebuffer with the allocated memory
1413  */
1414 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1415 {
1416         struct drm_i915_private *dev_priv;
1417         int ret = 0;
1418
1419         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1420         if (dev_priv == NULL)
1421                 return -ENOMEM;
1422
1423         dev->dev_private = dev_priv;
1424         /* Must be set before calling __i915_printk */
1425         dev_priv->dev = dev;
1426
1427         ret = i915_driver_init_early(dev_priv, dev,
1428                                      (struct intel_device_info *)flags);
1429
1430         if (ret < 0)
1431                 goto out_free_priv;
1432
1433         intel_runtime_pm_get(dev_priv);
1434
1435         ret = i915_driver_init_mmio(dev_priv);
1436         if (ret < 0)
1437                 goto out_runtime_pm_put;
1438
1439         ret = i915_driver_init_hw(dev_priv);
1440         if (ret < 0)
1441                 goto out_cleanup_mmio;
1442
1443         /*
1444          * TODO: move the vblank init and parts of modeset init steps into one
1445          * of the i915_driver_init_/i915_driver_register functions according
1446          * to the role/effect of the given init step.
1447          */
1448         if (INTEL_INFO(dev)->num_pipes) {
1449                 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1450                 if (ret)
1451                         goto out_cleanup_hw;
1452         }
1453
1454         ret = i915_load_modeset_init(dev);
1455         if (ret < 0)
1456                 goto out_cleanup_vblank;
1457
1458         i915_driver_register(dev_priv);
1459
1460         intel_runtime_pm_enable(dev_priv);
1461
1462         intel_runtime_pm_put(dev_priv);
1463
1464         return 0;
1465
1466 out_cleanup_vblank:
1467         drm_vblank_cleanup(dev);
1468 out_cleanup_hw:
1469         i915_driver_cleanup_hw(dev_priv);
1470 out_cleanup_mmio:
1471         i915_driver_cleanup_mmio(dev_priv);
1472 out_runtime_pm_put:
1473         intel_runtime_pm_put(dev_priv);
1474         i915_driver_cleanup_early(dev_priv);
1475 out_free_priv:
1476         i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1477
1478         kfree(dev_priv);
1479
1480         return ret;
1481 }
1482
1483 int i915_driver_unload(struct drm_device *dev)
1484 {
1485         struct drm_i915_private *dev_priv = dev->dev_private;
1486         int ret;
1487
1488         intel_fbdev_fini(dev);
1489
1490         ret = i915_gem_suspend(dev);
1491         if (ret) {
1492                 DRM_ERROR("failed to idle hardware: %d\n", ret);
1493                 return ret;
1494         }
1495
1496         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1497
1498         i915_driver_unregister(dev_priv);
1499
1500         drm_vblank_cleanup(dev);
1501
1502         intel_modeset_cleanup(dev);
1503
1504         /*
1505          * free the memory space allocated for the child device
1506          * config parsed from VBT
1507          */
1508         if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1509                 kfree(dev_priv->vbt.child_dev);
1510                 dev_priv->vbt.child_dev = NULL;
1511                 dev_priv->vbt.child_dev_num = 0;
1512         }
1513         kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1514         dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1515         kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1516         dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1517
1518         vga_switcheroo_unregister_client(dev->pdev);
1519         vga_client_register(dev->pdev, NULL, NULL, NULL);
1520
1521         intel_csr_ucode_fini(dev_priv);
1522
1523         /* Free error state after interrupts are fully disabled. */
1524         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1525         i915_destroy_error_state(dev);
1526
1527         /* Flush any outstanding unpin_work. */
1528         flush_workqueue(dev_priv->wq);
1529
1530         intel_guc_ucode_fini(dev);
1531         i915_gem_fini(dev);
1532         intel_fbc_cleanup_cfb(dev_priv);
1533
1534         intel_power_domains_fini(dev_priv);
1535
1536         i915_driver_cleanup_hw(dev_priv);
1537         i915_driver_cleanup_mmio(dev_priv);
1538
1539         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1540
1541         i915_driver_cleanup_early(dev_priv);
1542         kfree(dev_priv);
1543
1544         return 0;
1545 }
1546
1547 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1548 {
1549         int ret;
1550
1551         ret = i915_gem_open(dev, file);
1552         if (ret)
1553                 return ret;
1554
1555         return 0;
1556 }
1557
1558 /**
1559  * i915_driver_lastclose - clean up after all DRM clients have exited
1560  * @dev: DRM device
1561  *
1562  * Take care of cleaning up after all DRM clients have exited.  In the
1563  * mode setting case, we want to restore the kernel's initial mode (just
1564  * in case the last client left us in a bad state).
1565  *
1566  * Additionally, in the non-mode setting case, we'll tear down the GTT
1567  * and DMA structures, since the kernel won't be using them, and clea
1568  * up any GEM state.
1569  */
1570 void i915_driver_lastclose(struct drm_device *dev)
1571 {
1572         intel_fbdev_restore_mode(dev);
1573         vga_switcheroo_process_delayed_switch();
1574 }
1575
1576 void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1577 {
1578         mutex_lock(&dev->struct_mutex);
1579         i915_gem_context_close(dev, file);
1580         i915_gem_release(dev, file);
1581         mutex_unlock(&dev->struct_mutex);
1582 }
1583
1584 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1585 {
1586         struct drm_i915_file_private *file_priv = file->driver_priv;
1587
1588         kfree(file_priv);
1589 }
1590
1591 static int
1592 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1593                           struct drm_file *file)
1594 {
1595         return -ENODEV;
1596 }
1597
1598 const struct drm_ioctl_desc i915_ioctls[] = {
1599         DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1600         DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1601         DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1602         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1603         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1604         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1605         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1606         DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1607         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1608         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1609         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1610         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1611         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1612         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1613         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1614         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1615         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1616         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1617         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1618         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1619         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1620         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1621         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1622         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1623         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1624         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1625         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1626         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1627         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1628         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1629         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1630         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1631         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1632         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1633         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1634         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1635         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1636         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1637         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1638         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1639         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1640         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1641         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1642         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1643         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1644         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1645         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1646         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1647         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1648         DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1649         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1650         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1651 };
1652
1653 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);