Merge tag 'drm-intel-next-2015-02-27' of git://anongit.freedesktop.org/drm-intel...
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_uncore.c
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "i915_drv.h"
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
27
28 #include <linux/pm_runtime.h>
29
30 #define FORCEWAKE_ACK_TIMEOUT_MS 2
31
32 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
33 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
34
35 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
36 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
37
38 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
39 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
40
41 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
42 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
43
44 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
45
46 static const char * const forcewake_domain_names[] = {
47         "render",
48         "blitter",
49         "media",
50 };
51
52 const char *
53 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
54 {
55         BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
56                      FW_DOMAIN_ID_COUNT);
57
58         if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
59                 return forcewake_domain_names[id];
60
61         WARN_ON(id);
62
63         return "unknown";
64 }
65
66 static void
67 assert_device_not_suspended(struct drm_i915_private *dev_priv)
68 {
69         WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
70                   "Device suspended\n");
71 }
72
73 static inline void
74 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
75 {
76         WARN_ON(d->reg_set == 0);
77         __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
78 }
79
80 static inline void
81 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
82 {
83         mod_timer_pinned(&d->timer, jiffies + 1);
84 }
85
86 static inline void
87 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
88 {
89         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
90                              FORCEWAKE_KERNEL) == 0,
91                             FORCEWAKE_ACK_TIMEOUT_MS))
92                 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
93                           intel_uncore_forcewake_domain_to_str(d->id));
94 }
95
96 static inline void
97 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
98 {
99         __raw_i915_write32(d->i915, d->reg_set, d->val_set);
100 }
101
102 static inline void
103 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
104 {
105         if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
106                              FORCEWAKE_KERNEL),
107                             FORCEWAKE_ACK_TIMEOUT_MS))
108                 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
109                           intel_uncore_forcewake_domain_to_str(d->id));
110 }
111
112 static inline void
113 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
114 {
115         __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
116 }
117
118 static inline void
119 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
120 {
121         /* something from same cacheline, but not from the set register */
122         if (d->reg_post)
123                 __raw_posting_read(d->i915, d->reg_post);
124 }
125
126 static void
127 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
128 {
129         struct intel_uncore_forcewake_domain *d;
130         enum forcewake_domain_id id;
131
132         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
133                 fw_domain_wait_ack_clear(d);
134                 fw_domain_get(d);
135                 fw_domain_wait_ack(d);
136         }
137 }
138
139 static void
140 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
141 {
142         struct intel_uncore_forcewake_domain *d;
143         enum forcewake_domain_id id;
144
145         for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
146                 fw_domain_put(d);
147                 fw_domain_posting_read(d);
148         }
149 }
150
151 static void
152 fw_domains_posting_read(struct drm_i915_private *dev_priv)
153 {
154         struct intel_uncore_forcewake_domain *d;
155         enum forcewake_domain_id id;
156
157         /* No need to do for all, just do for first found */
158         for_each_fw_domain(d, dev_priv, id) {
159                 fw_domain_posting_read(d);
160                 break;
161         }
162 }
163
164 static void
165 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
166 {
167         struct intel_uncore_forcewake_domain *d;
168         enum forcewake_domain_id id;
169
170         if (dev_priv->uncore.fw_domains == 0)
171                 return;
172
173         for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
174                 fw_domain_reset(d);
175
176         fw_domains_posting_read(dev_priv);
177 }
178
179 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
180 {
181         /* w/a for a sporadic read returning 0 by waiting for the GT
182          * thread to wake up.
183          */
184         if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
185                                 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
186                 DRM_ERROR("GT thread status wait timed out\n");
187 }
188
189 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
190                                               enum forcewake_domains fw_domains)
191 {
192         fw_domains_get(dev_priv, fw_domains);
193
194         /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
195         __gen6_gt_wait_for_thread_c0(dev_priv);
196 }
197
198 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
199 {
200         u32 gtfifodbg;
201
202         gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
203         if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
204                 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
205 }
206
207 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
208                                      enum forcewake_domains fw_domains)
209 {
210         fw_domains_put(dev_priv, fw_domains);
211         gen6_gt_check_fifodbg(dev_priv);
212 }
213
214 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
215 {
216         u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
217
218         return count & GT_FIFO_FREE_ENTRIES_MASK;
219 }
220
221 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
222 {
223         int ret = 0;
224
225         /* On VLV, FIFO will be shared by both SW and HW.
226          * So, we need to read the FREE_ENTRIES everytime */
227         if (IS_VALLEYVIEW(dev_priv->dev))
228                 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
229
230         if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
231                 int loop = 500;
232                 u32 fifo = fifo_free_entries(dev_priv);
233
234                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
235                         udelay(10);
236                         fifo = fifo_free_entries(dev_priv);
237                 }
238                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
239                         ++ret;
240                 dev_priv->uncore.fifo_count = fifo;
241         }
242         dev_priv->uncore.fifo_count--;
243
244         return ret;
245 }
246
247 static void intel_uncore_fw_release_timer(unsigned long arg)
248 {
249         struct intel_uncore_forcewake_domain *domain = (void *)arg;
250         unsigned long irqflags;
251
252         assert_device_not_suspended(domain->i915);
253
254         spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
255         if (WARN_ON(domain->wake_count == 0))
256                 domain->wake_count++;
257
258         if (--domain->wake_count == 0)
259                 domain->i915->uncore.funcs.force_wake_put(domain->i915,
260                                                           1 << domain->id);
261
262         spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
263 }
264
265 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
266 {
267         struct drm_i915_private *dev_priv = dev->dev_private;
268         unsigned long irqflags;
269         struct intel_uncore_forcewake_domain *domain;
270         int retry_count = 100;
271         enum forcewake_domain_id id;
272         enum forcewake_domains fw = 0, active_domains;
273
274         /* Hold uncore.lock across reset to prevent any register access
275          * with forcewake not set correctly. Wait until all pending
276          * timers are run before holding.
277          */
278         while (1) {
279                 active_domains = 0;
280
281                 for_each_fw_domain(domain, dev_priv, id) {
282                         if (del_timer_sync(&domain->timer) == 0)
283                                 continue;
284
285                         intel_uncore_fw_release_timer((unsigned long)domain);
286                 }
287
288                 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
289
290                 for_each_fw_domain(domain, dev_priv, id) {
291                         if (timer_pending(&domain->timer))
292                                 active_domains |= (1 << id);
293                 }
294
295                 if (active_domains == 0)
296                         break;
297
298                 if (--retry_count == 0) {
299                         DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
300                         break;
301                 }
302
303                 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
304                 cond_resched();
305         }
306
307         WARN_ON(active_domains);
308
309         for_each_fw_domain(domain, dev_priv, id)
310                 if (domain->wake_count)
311                         fw |= 1 << id;
312
313         if (fw)
314                 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
315
316         fw_domains_reset(dev_priv, FORCEWAKE_ALL);
317
318         if (restore) { /* If reset with a user forcewake, try to restore */
319                 if (fw)
320                         dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
321
322                 if (IS_GEN6(dev) || IS_GEN7(dev))
323                         dev_priv->uncore.fifo_count =
324                                 fifo_free_entries(dev_priv);
325         }
326
327         if (!restore)
328                 assert_forcewakes_inactive(dev_priv);
329
330         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
331 }
332
333 static void intel_uncore_ellc_detect(struct drm_device *dev)
334 {
335         struct drm_i915_private *dev_priv = dev->dev_private;
336
337         if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
338              INTEL_INFO(dev)->gen >= 9) &&
339             (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
340                 /* The docs do not explain exactly how the calculation can be
341                  * made. It is somewhat guessable, but for now, it's always
342                  * 128MB.
343                  * NB: We can't write IDICR yet because we do not have gt funcs
344                  * set up */
345                 dev_priv->ellc_size = 128;
346                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
347         }
348 }
349
350 static void __intel_uncore_early_sanitize(struct drm_device *dev,
351                                           bool restore_forcewake)
352 {
353         struct drm_i915_private *dev_priv = dev->dev_private;
354
355         if (HAS_FPGA_DBG_UNCLAIMED(dev))
356                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
357
358         /* clear out old GT FIFO errors */
359         if (IS_GEN6(dev) || IS_GEN7(dev))
360                 __raw_i915_write32(dev_priv, GTFIFODBG,
361                                    __raw_i915_read32(dev_priv, GTFIFODBG));
362
363         intel_uncore_forcewake_reset(dev, restore_forcewake);
364 }
365
366 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
367 {
368         __intel_uncore_early_sanitize(dev, restore_forcewake);
369         i915_check_and_clear_faults(dev);
370 }
371
372 void intel_uncore_sanitize(struct drm_device *dev)
373 {
374         /* BIOS often leaves RC6 enabled, but disable it for hw init */
375         intel_disable_gt_powersave(dev);
376 }
377
378 /**
379  * intel_uncore_forcewake_get - grab forcewake domain references
380  * @dev_priv: i915 device instance
381  * @fw_domains: forcewake domains to get reference on
382  *
383  * This function can be used get GT's forcewake domain references.
384  * Normal register access will handle the forcewake domains automatically.
385  * However if some sequence requires the GT to not power down a particular
386  * forcewake domains this function should be called at the beginning of the
387  * sequence. And subsequently the reference should be dropped by symmetric
388  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
389  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
390  */
391 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
392                                 enum forcewake_domains fw_domains)
393 {
394         unsigned long irqflags;
395         struct intel_uncore_forcewake_domain *domain;
396         enum forcewake_domain_id id;
397
398         if (!dev_priv->uncore.funcs.force_wake_get)
399                 return;
400
401         WARN_ON(dev_priv->pm.suspended);
402
403         fw_domains &= dev_priv->uncore.fw_domains;
404
405         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
406
407         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
408                 if (domain->wake_count++)
409                         fw_domains &= ~(1 << id);
410         }
411
412         if (fw_domains)
413                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
414
415         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
416 }
417
418 /**
419  * intel_uncore_forcewake_put - release a forcewake domain reference
420  * @dev_priv: i915 device instance
421  * @fw_domains: forcewake domains to put references
422  *
423  * This function drops the device-level forcewakes for specified
424  * domains obtained by intel_uncore_forcewake_get().
425  */
426 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
427                                 enum forcewake_domains fw_domains)
428 {
429         unsigned long irqflags;
430         struct intel_uncore_forcewake_domain *domain;
431         enum forcewake_domain_id id;
432
433         if (!dev_priv->uncore.funcs.force_wake_put)
434                 return;
435
436         fw_domains &= dev_priv->uncore.fw_domains;
437
438         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
439
440         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
441                 if (WARN_ON(domain->wake_count == 0))
442                         continue;
443
444                 if (--domain->wake_count)
445                         continue;
446
447                 domain->wake_count++;
448                 fw_domain_arm_timer(domain);
449         }
450
451         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
452 }
453
454 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
455 {
456         struct intel_uncore_forcewake_domain *domain;
457         enum forcewake_domain_id id;
458
459         if (!dev_priv->uncore.funcs.force_wake_get)
460                 return;
461
462         for_each_fw_domain(domain, dev_priv, id)
463                 WARN_ON(domain->wake_count);
464 }
465
466 /* We give fast paths for the really cool registers */
467 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
468          ((reg) < 0x40000 && (reg) != FORCEWAKE)
469
470 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
471
472 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
473         (REG_RANGE((reg), 0x2000, 0x4000) || \
474          REG_RANGE((reg), 0x5000, 0x8000) || \
475          REG_RANGE((reg), 0xB000, 0x12000) || \
476          REG_RANGE((reg), 0x2E000, 0x30000))
477
478 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
479         (REG_RANGE((reg), 0x12000, 0x14000) || \
480          REG_RANGE((reg), 0x22000, 0x24000) || \
481          REG_RANGE((reg), 0x30000, 0x40000))
482
483 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
484         (REG_RANGE((reg), 0x2000, 0x4000) || \
485          REG_RANGE((reg), 0x5200, 0x8000) || \
486          REG_RANGE((reg), 0x8300, 0x8500) || \
487          REG_RANGE((reg), 0xB000, 0xB480) || \
488          REG_RANGE((reg), 0xE000, 0xE800))
489
490 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
491         (REG_RANGE((reg), 0x8800, 0x8900) || \
492          REG_RANGE((reg), 0xD000, 0xD800) || \
493          REG_RANGE((reg), 0x12000, 0x14000) || \
494          REG_RANGE((reg), 0x1A000, 0x1C000) || \
495          REG_RANGE((reg), 0x1E800, 0x1EA00) || \
496          REG_RANGE((reg), 0x30000, 0x38000))
497
498 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
499         (REG_RANGE((reg), 0x4000, 0x5000) || \
500          REG_RANGE((reg), 0x8000, 0x8300) || \
501          REG_RANGE((reg), 0x8500, 0x8600) || \
502          REG_RANGE((reg), 0x9000, 0xB000) || \
503          REG_RANGE((reg), 0xF000, 0x10000))
504
505 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
506         REG_RANGE((reg), 0xB00,  0x2000)
507
508 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
509         (REG_RANGE((reg), 0x2000, 0x2700) || \
510          REG_RANGE((reg), 0x3000, 0x4000) || \
511          REG_RANGE((reg), 0x5200, 0x8000) || \
512          REG_RANGE((reg), 0x8140, 0x8160) || \
513          REG_RANGE((reg), 0x8300, 0x8500) || \
514          REG_RANGE((reg), 0x8C00, 0x8D00) || \
515          REG_RANGE((reg), 0xB000, 0xB480) || \
516          REG_RANGE((reg), 0xE000, 0xE900) || \
517          REG_RANGE((reg), 0x24400, 0x24800))
518
519 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
520         (REG_RANGE((reg), 0x8130, 0x8140) || \
521          REG_RANGE((reg), 0x8800, 0x8A00) || \
522          REG_RANGE((reg), 0xD000, 0xD800) || \
523          REG_RANGE((reg), 0x12000, 0x14000) || \
524          REG_RANGE((reg), 0x1A000, 0x1EA00) || \
525          REG_RANGE((reg), 0x30000, 0x40000))
526
527 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
528         REG_RANGE((reg), 0x9400, 0x9800)
529
530 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
531         ((reg) < 0x40000 &&\
532          !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
533          !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
534          !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
535          !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
536
537 static void
538 ilk_dummy_write(struct drm_i915_private *dev_priv)
539 {
540         /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
541          * the chip from rc6 before touching it for real. MI_MODE is masked,
542          * hence harmless to write 0 into. */
543         __raw_i915_write32(dev_priv, MI_MODE, 0);
544 }
545
546 static void
547 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
548                         bool before)
549 {
550         const char *op = read ? "reading" : "writing to";
551         const char *when = before ? "before" : "after";
552
553         if (!i915.mmio_debug)
554                 return;
555
556         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
557                 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
558                      when, op, reg);
559                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
560         }
561 }
562
563 static void
564 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
565 {
566         if (i915.mmio_debug)
567                 return;
568
569         if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
570                 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
571                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
572         }
573 }
574
575 #define GEN2_READ_HEADER(x) \
576         u##x val = 0; \
577         assert_device_not_suspended(dev_priv);
578
579 #define GEN2_READ_FOOTER \
580         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
581         return val
582
583 #define __gen2_read(x) \
584 static u##x \
585 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
586         GEN2_READ_HEADER(x); \
587         val = __raw_i915_read##x(dev_priv, reg); \
588         GEN2_READ_FOOTER; \
589 }
590
591 #define __gen5_read(x) \
592 static u##x \
593 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
594         GEN2_READ_HEADER(x); \
595         ilk_dummy_write(dev_priv); \
596         val = __raw_i915_read##x(dev_priv, reg); \
597         GEN2_READ_FOOTER; \
598 }
599
600 __gen5_read(8)
601 __gen5_read(16)
602 __gen5_read(32)
603 __gen5_read(64)
604 __gen2_read(8)
605 __gen2_read(16)
606 __gen2_read(32)
607 __gen2_read(64)
608
609 #undef __gen5_read
610 #undef __gen2_read
611
612 #undef GEN2_READ_FOOTER
613 #undef GEN2_READ_HEADER
614
615 #define GEN6_READ_HEADER(x) \
616         unsigned long irqflags; \
617         u##x val = 0; \
618         assert_device_not_suspended(dev_priv); \
619         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
620
621 #define GEN6_READ_FOOTER \
622         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
623         trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
624         return val
625
626 static inline void __force_wake_get(struct drm_i915_private *dev_priv,
627                                     enum forcewake_domains fw_domains)
628 {
629         struct intel_uncore_forcewake_domain *domain;
630         enum forcewake_domain_id id;
631
632         if (WARN_ON(!fw_domains))
633                 return;
634
635         /* Ideally GCC would be constant-fold and eliminate this loop */
636         for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
637                 if (domain->wake_count) {
638                         fw_domains &= ~(1 << id);
639                         continue;
640                 }
641
642                 domain->wake_count++;
643                 fw_domain_arm_timer(domain);
644         }
645
646         if (fw_domains)
647                 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
648 }
649
650 #define __vgpu_read(x) \
651 static u##x \
652 vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
653         GEN6_READ_HEADER(x); \
654         val = __raw_i915_read##x(dev_priv, reg); \
655         GEN6_READ_FOOTER; \
656 }
657
658 #define __gen6_read(x) \
659 static u##x \
660 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
661         GEN6_READ_HEADER(x); \
662         hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
663         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
664                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
665         val = __raw_i915_read##x(dev_priv, reg); \
666         hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
667         GEN6_READ_FOOTER; \
668 }
669
670 #define __vlv_read(x) \
671 static u##x \
672 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
673         GEN6_READ_HEADER(x); \
674         if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
675                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
676         else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
677                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
678         val = __raw_i915_read##x(dev_priv, reg); \
679         GEN6_READ_FOOTER; \
680 }
681
682 #define __chv_read(x) \
683 static u##x \
684 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
685         GEN6_READ_HEADER(x); \
686         if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
687                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
688         else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
689                 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
690         else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
691                 __force_wake_get(dev_priv, \
692                                  FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
693         val = __raw_i915_read##x(dev_priv, reg); \
694         GEN6_READ_FOOTER; \
695 }
696
697 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg)     \
698          ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
699
700 #define __gen9_read(x) \
701 static u##x \
702 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
703         enum forcewake_domains fw_engine; \
704         GEN6_READ_HEADER(x); \
705         if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)))   \
706                 fw_engine = 0; \
707         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg))       \
708                 fw_engine = FORCEWAKE_RENDER; \
709         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
710                 fw_engine = FORCEWAKE_MEDIA; \
711         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
712                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
713         else \
714                 fw_engine = FORCEWAKE_BLITTER; \
715         if (fw_engine) \
716                 __force_wake_get(dev_priv, fw_engine); \
717         val = __raw_i915_read##x(dev_priv, reg); \
718         GEN6_READ_FOOTER; \
719 }
720
721 __vgpu_read(8)
722 __vgpu_read(16)
723 __vgpu_read(32)
724 __vgpu_read(64)
725 __gen9_read(8)
726 __gen9_read(16)
727 __gen9_read(32)
728 __gen9_read(64)
729 __chv_read(8)
730 __chv_read(16)
731 __chv_read(32)
732 __chv_read(64)
733 __vlv_read(8)
734 __vlv_read(16)
735 __vlv_read(32)
736 __vlv_read(64)
737 __gen6_read(8)
738 __gen6_read(16)
739 __gen6_read(32)
740 __gen6_read(64)
741
742 #undef __gen9_read
743 #undef __chv_read
744 #undef __vlv_read
745 #undef __gen6_read
746 #undef __vgpu_read
747 #undef GEN6_READ_FOOTER
748 #undef GEN6_READ_HEADER
749
750 #define GEN2_WRITE_HEADER \
751         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
752         assert_device_not_suspended(dev_priv); \
753
754 #define GEN2_WRITE_FOOTER
755
756 #define __gen2_write(x) \
757 static void \
758 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
759         GEN2_WRITE_HEADER; \
760         __raw_i915_write##x(dev_priv, reg, val); \
761         GEN2_WRITE_FOOTER; \
762 }
763
764 #define __gen5_write(x) \
765 static void \
766 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
767         GEN2_WRITE_HEADER; \
768         ilk_dummy_write(dev_priv); \
769         __raw_i915_write##x(dev_priv, reg, val); \
770         GEN2_WRITE_FOOTER; \
771 }
772
773 __gen5_write(8)
774 __gen5_write(16)
775 __gen5_write(32)
776 __gen5_write(64)
777 __gen2_write(8)
778 __gen2_write(16)
779 __gen2_write(32)
780 __gen2_write(64)
781
782 #undef __gen5_write
783 #undef __gen2_write
784
785 #undef GEN2_WRITE_FOOTER
786 #undef GEN2_WRITE_HEADER
787
788 #define GEN6_WRITE_HEADER \
789         unsigned long irqflags; \
790         trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
791         assert_device_not_suspended(dev_priv); \
792         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
793
794 #define GEN6_WRITE_FOOTER \
795         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
796
797 #define __gen6_write(x) \
798 static void \
799 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
800         u32 __fifo_ret = 0; \
801         GEN6_WRITE_HEADER; \
802         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
803                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
804         } \
805         __raw_i915_write##x(dev_priv, reg, val); \
806         if (unlikely(__fifo_ret)) { \
807                 gen6_gt_check_fifodbg(dev_priv); \
808         } \
809         GEN6_WRITE_FOOTER; \
810 }
811
812 #define __hsw_write(x) \
813 static void \
814 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
815         u32 __fifo_ret = 0; \
816         GEN6_WRITE_HEADER; \
817         if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
818                 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
819         } \
820         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
821         __raw_i915_write##x(dev_priv, reg, val); \
822         if (unlikely(__fifo_ret)) { \
823                 gen6_gt_check_fifodbg(dev_priv); \
824         } \
825         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
826         hsw_unclaimed_reg_detect(dev_priv); \
827         GEN6_WRITE_FOOTER; \
828 }
829
830 #define __vgpu_write(x) \
831 static void vgpu_write##x(struct drm_i915_private *dev_priv, \
832                           off_t reg, u##x val, bool trace) { \
833         GEN6_WRITE_HEADER; \
834         __raw_i915_write##x(dev_priv, reg, val); \
835         GEN6_WRITE_FOOTER; \
836 }
837
838 static const u32 gen8_shadowed_regs[] = {
839         FORCEWAKE_MT,
840         GEN6_RPNSWREQ,
841         GEN6_RC_VIDEO_FREQ,
842         RING_TAIL(RENDER_RING_BASE),
843         RING_TAIL(GEN6_BSD_RING_BASE),
844         RING_TAIL(VEBOX_RING_BASE),
845         RING_TAIL(BLT_RING_BASE),
846         /* TODO: Other registers are not yet used */
847 };
848
849 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
850 {
851         int i;
852         for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
853                 if (reg == gen8_shadowed_regs[i])
854                         return true;
855
856         return false;
857 }
858
859 #define __gen8_write(x) \
860 static void \
861 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
862         GEN6_WRITE_HEADER; \
863         hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
864         if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
865                 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
866         __raw_i915_write##x(dev_priv, reg, val); \
867         hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
868         hsw_unclaimed_reg_detect(dev_priv); \
869         GEN6_WRITE_FOOTER; \
870 }
871
872 #define __chv_write(x) \
873 static void \
874 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
875         bool shadowed = is_gen8_shadowed(dev_priv, reg); \
876         GEN6_WRITE_HEADER; \
877         if (!shadowed) { \
878                 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
879                         __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
880                 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
881                         __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
882                 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
883                         __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
884         } \
885         __raw_i915_write##x(dev_priv, reg, val); \
886         GEN6_WRITE_FOOTER; \
887 }
888
889 static const u32 gen9_shadowed_regs[] = {
890         RING_TAIL(RENDER_RING_BASE),
891         RING_TAIL(GEN6_BSD_RING_BASE),
892         RING_TAIL(VEBOX_RING_BASE),
893         RING_TAIL(BLT_RING_BASE),
894         FORCEWAKE_BLITTER_GEN9,
895         FORCEWAKE_RENDER_GEN9,
896         FORCEWAKE_MEDIA_GEN9,
897         GEN6_RPNSWREQ,
898         GEN6_RC_VIDEO_FREQ,
899         /* TODO: Other registers are not yet used */
900 };
901
902 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
903 {
904         int i;
905         for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
906                 if (reg == gen9_shadowed_regs[i])
907                         return true;
908
909         return false;
910 }
911
912 #define __gen9_write(x) \
913 static void \
914 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
915                 bool trace) { \
916         enum forcewake_domains fw_engine; \
917         GEN6_WRITE_HEADER; \
918         if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
919             is_gen9_shadowed(dev_priv, reg)) \
920                 fw_engine = 0; \
921         else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
922                 fw_engine = FORCEWAKE_RENDER; \
923         else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
924                 fw_engine = FORCEWAKE_MEDIA; \
925         else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
926                 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
927         else \
928                 fw_engine = FORCEWAKE_BLITTER; \
929         if (fw_engine) \
930                 __force_wake_get(dev_priv, fw_engine); \
931         __raw_i915_write##x(dev_priv, reg, val); \
932         GEN6_WRITE_FOOTER; \
933 }
934
935 __gen9_write(8)
936 __gen9_write(16)
937 __gen9_write(32)
938 __gen9_write(64)
939 __chv_write(8)
940 __chv_write(16)
941 __chv_write(32)
942 __chv_write(64)
943 __gen8_write(8)
944 __gen8_write(16)
945 __gen8_write(32)
946 __gen8_write(64)
947 __hsw_write(8)
948 __hsw_write(16)
949 __hsw_write(32)
950 __hsw_write(64)
951 __gen6_write(8)
952 __gen6_write(16)
953 __gen6_write(32)
954 __gen6_write(64)
955 __vgpu_write(8)
956 __vgpu_write(16)
957 __vgpu_write(32)
958 __vgpu_write(64)
959
960 #undef __gen9_write
961 #undef __chv_write
962 #undef __gen8_write
963 #undef __hsw_write
964 #undef __gen6_write
965 #undef __vgpu_write
966 #undef GEN6_WRITE_FOOTER
967 #undef GEN6_WRITE_HEADER
968
969 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
970 do { \
971         dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
972         dev_priv->uncore.funcs.mmio_writew = x##_write16; \
973         dev_priv->uncore.funcs.mmio_writel = x##_write32; \
974         dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
975 } while (0)
976
977 #define ASSIGN_READ_MMIO_VFUNCS(x) \
978 do { \
979         dev_priv->uncore.funcs.mmio_readb = x##_read8; \
980         dev_priv->uncore.funcs.mmio_readw = x##_read16; \
981         dev_priv->uncore.funcs.mmio_readl = x##_read32; \
982         dev_priv->uncore.funcs.mmio_readq = x##_read64; \
983 } while (0)
984
985
986 static void fw_domain_init(struct drm_i915_private *dev_priv,
987                            enum forcewake_domain_id domain_id,
988                            u32 reg_set, u32 reg_ack)
989 {
990         struct intel_uncore_forcewake_domain *d;
991
992         if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
993                 return;
994
995         d = &dev_priv->uncore.fw_domain[domain_id];
996
997         WARN_ON(d->wake_count);
998
999         d->wake_count = 0;
1000         d->reg_set = reg_set;
1001         d->reg_ack = reg_ack;
1002
1003         if (IS_GEN6(dev_priv)) {
1004                 d->val_reset = 0;
1005                 d->val_set = FORCEWAKE_KERNEL;
1006                 d->val_clear = 0;
1007         } else {
1008                 /* WaRsClearFWBitsAtReset:bdw,skl */
1009                 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1010                 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1011                 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1012         }
1013
1014         if (IS_VALLEYVIEW(dev_priv))
1015                 d->reg_post = FORCEWAKE_ACK_VLV;
1016         else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1017                 d->reg_post = ECOBUS;
1018         else
1019                 d->reg_post = 0;
1020
1021         d->i915 = dev_priv;
1022         d->id = domain_id;
1023
1024         setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1025
1026         dev_priv->uncore.fw_domains |= (1 << domain_id);
1027
1028         fw_domain_reset(d);
1029 }
1030
1031 static void intel_uncore_fw_domains_init(struct drm_device *dev)
1032 {
1033         struct drm_i915_private *dev_priv = dev->dev_private;
1034
1035         if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1036                 return;
1037
1038         if (IS_GEN9(dev)) {
1039                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1040                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1041                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1042                                FORCEWAKE_RENDER_GEN9,
1043                                FORCEWAKE_ACK_RENDER_GEN9);
1044                 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1045                                FORCEWAKE_BLITTER_GEN9,
1046                                FORCEWAKE_ACK_BLITTER_GEN9);
1047                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1048                                FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1049         } else if (IS_VALLEYVIEW(dev)) {
1050                 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1051                 if (!IS_CHERRYVIEW(dev))
1052                         dev_priv->uncore.funcs.force_wake_put =
1053                                 fw_domains_put_with_fifo;
1054                 else
1055                         dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1056                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1057                                FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1058                 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1059                                FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1060         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1061                 dev_priv->uncore.funcs.force_wake_get =
1062                         fw_domains_get_with_thread_status;
1063                 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1064                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1065                                FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1066         } else if (IS_IVYBRIDGE(dev)) {
1067                 u32 ecobus;
1068
1069                 /* IVB configs may use multi-threaded forcewake */
1070
1071                 /* A small trick here - if the bios hasn't configured
1072                  * MT forcewake, and if the device is in RC6, then
1073                  * force_wake_mt_get will not wake the device and the
1074                  * ECOBUS read will return zero. Which will be
1075                  * (correctly) interpreted by the test below as MT
1076                  * forcewake being disabled.
1077                  */
1078                 dev_priv->uncore.funcs.force_wake_get =
1079                         fw_domains_get_with_thread_status;
1080                 dev_priv->uncore.funcs.force_wake_put =
1081                         fw_domains_put_with_fifo;
1082
1083                 /* We need to init first for ECOBUS access and then
1084                  * determine later if we want to reinit, in case of MT access is
1085                  * not working
1086                  */
1087                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1088                                FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1089
1090                 mutex_lock(&dev->struct_mutex);
1091                 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1092                 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1093                 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1094                 mutex_unlock(&dev->struct_mutex);
1095
1096                 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1097                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1098                         DRM_INFO("when using vblank-synced partial screen updates.\n");
1099                         fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1100                                        FORCEWAKE, FORCEWAKE_ACK);
1101                 }
1102         } else if (IS_GEN6(dev)) {
1103                 dev_priv->uncore.funcs.force_wake_get =
1104                         fw_domains_get_with_thread_status;
1105                 dev_priv->uncore.funcs.force_wake_put =
1106                         fw_domains_put_with_fifo;
1107                 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1108                                FORCEWAKE, FORCEWAKE_ACK);
1109         }
1110
1111         /* All future platforms are expected to require complex power gating */
1112         WARN_ON(dev_priv->uncore.fw_domains == 0);
1113 }
1114
1115 void intel_uncore_init(struct drm_device *dev)
1116 {
1117         struct drm_i915_private *dev_priv = dev->dev_private;
1118
1119         i915_check_vgpu(dev);
1120
1121         intel_uncore_ellc_detect(dev);
1122         intel_uncore_fw_domains_init(dev);
1123         __intel_uncore_early_sanitize(dev, false);
1124
1125         switch (INTEL_INFO(dev)->gen) {
1126         default:
1127                 MISSING_CASE(INTEL_INFO(dev)->gen);
1128                 return;
1129         case 9:
1130                 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1131                 ASSIGN_READ_MMIO_VFUNCS(gen9);
1132                 break;
1133         case 8:
1134                 if (IS_CHERRYVIEW(dev)) {
1135                         ASSIGN_WRITE_MMIO_VFUNCS(chv);
1136                         ASSIGN_READ_MMIO_VFUNCS(chv);
1137
1138                 } else {
1139                         ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1140                         ASSIGN_READ_MMIO_VFUNCS(gen6);
1141                 }
1142                 break;
1143         case 7:
1144         case 6:
1145                 if (IS_HASWELL(dev)) {
1146                         ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1147                 } else {
1148                         ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1149                 }
1150
1151                 if (IS_VALLEYVIEW(dev)) {
1152                         ASSIGN_READ_MMIO_VFUNCS(vlv);
1153                 } else {
1154                         ASSIGN_READ_MMIO_VFUNCS(gen6);
1155                 }
1156                 break;
1157         case 5:
1158                 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1159                 ASSIGN_READ_MMIO_VFUNCS(gen5);
1160                 break;
1161         case 4:
1162         case 3:
1163         case 2:
1164                 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1165                 ASSIGN_READ_MMIO_VFUNCS(gen2);
1166                 break;
1167         }
1168
1169         if (intel_vgpu_active(dev)) {
1170                 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1171                 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1172         }
1173
1174         i915_check_and_clear_faults(dev);
1175 }
1176 #undef ASSIGN_WRITE_MMIO_VFUNCS
1177 #undef ASSIGN_READ_MMIO_VFUNCS
1178
1179 void intel_uncore_fini(struct drm_device *dev)
1180 {
1181         /* Paranoia: make sure we have disabled everything before we exit. */
1182         intel_uncore_sanitize(dev);
1183         intel_uncore_forcewake_reset(dev, false);
1184 }
1185
1186 #define GEN_RANGE(l, h) GENMASK(h, l)
1187
1188 static const struct register_whitelist {
1189         uint64_t offset;
1190         uint32_t size;
1191         /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1192         uint32_t gen_bitmask;
1193 } whitelist[] = {
1194         { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
1195 };
1196
1197 int i915_reg_read_ioctl(struct drm_device *dev,
1198                         void *data, struct drm_file *file)
1199 {
1200         struct drm_i915_private *dev_priv = dev->dev_private;
1201         struct drm_i915_reg_read *reg = data;
1202         struct register_whitelist const *entry = whitelist;
1203         int i, ret = 0;
1204
1205         for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1206                 if (entry->offset == reg->offset &&
1207                     (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1208                         break;
1209         }
1210
1211         if (i == ARRAY_SIZE(whitelist))
1212                 return -EINVAL;
1213
1214         intel_runtime_pm_get(dev_priv);
1215
1216         switch (entry->size) {
1217         case 8:
1218                 reg->val = I915_READ64(reg->offset);
1219                 break;
1220         case 4:
1221                 reg->val = I915_READ(reg->offset);
1222                 break;
1223         case 2:
1224                 reg->val = I915_READ16(reg->offset);
1225                 break;
1226         case 1:
1227                 reg->val = I915_READ8(reg->offset);
1228                 break;
1229         default:
1230                 MISSING_CASE(entry->size);
1231                 ret = -EINVAL;
1232                 goto out;
1233         }
1234
1235 out:
1236         intel_runtime_pm_put(dev_priv);
1237         return ret;
1238 }
1239
1240 int i915_get_reset_stats_ioctl(struct drm_device *dev,
1241                                void *data, struct drm_file *file)
1242 {
1243         struct drm_i915_private *dev_priv = dev->dev_private;
1244         struct drm_i915_reset_stats *args = data;
1245         struct i915_ctx_hang_stats *hs;
1246         struct intel_context *ctx;
1247         int ret;
1248
1249         if (args->flags || args->pad)
1250                 return -EINVAL;
1251
1252         if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1253                 return -EPERM;
1254
1255         ret = mutex_lock_interruptible(&dev->struct_mutex);
1256         if (ret)
1257                 return ret;
1258
1259         ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1260         if (IS_ERR(ctx)) {
1261                 mutex_unlock(&dev->struct_mutex);
1262                 return PTR_ERR(ctx);
1263         }
1264         hs = &ctx->hang_stats;
1265
1266         if (capable(CAP_SYS_ADMIN))
1267                 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1268         else
1269                 args->reset_count = 0;
1270
1271         args->batch_active = hs->batch_active;
1272         args->batch_pending = hs->batch_pending;
1273
1274         mutex_unlock(&dev->struct_mutex);
1275
1276         return 0;
1277 }
1278
1279 static int i915_reset_complete(struct drm_device *dev)
1280 {
1281         u8 gdrst;
1282         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1283         return (gdrst & GRDOM_RESET_STATUS) == 0;
1284 }
1285
1286 static int i915_do_reset(struct drm_device *dev)
1287 {
1288         /* assert reset for at least 20 usec */
1289         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1290         udelay(20);
1291         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1292
1293         return wait_for(i915_reset_complete(dev), 500);
1294 }
1295
1296 static int g4x_reset_complete(struct drm_device *dev)
1297 {
1298         u8 gdrst;
1299         pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1300         return (gdrst & GRDOM_RESET_ENABLE) == 0;
1301 }
1302
1303 static int g33_do_reset(struct drm_device *dev)
1304 {
1305         pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1306         return wait_for(g4x_reset_complete(dev), 500);
1307 }
1308
1309 static int g4x_do_reset(struct drm_device *dev)
1310 {
1311         struct drm_i915_private *dev_priv = dev->dev_private;
1312         int ret;
1313
1314         pci_write_config_byte(dev->pdev, I915_GDRST,
1315                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
1316         ret =  wait_for(g4x_reset_complete(dev), 500);
1317         if (ret)
1318                 return ret;
1319
1320         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1321         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1322         POSTING_READ(VDECCLK_GATE_D);
1323
1324         pci_write_config_byte(dev->pdev, I915_GDRST,
1325                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1326         ret =  wait_for(g4x_reset_complete(dev), 500);
1327         if (ret)
1328                 return ret;
1329
1330         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1331         I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1332         POSTING_READ(VDECCLK_GATE_D);
1333
1334         pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1335
1336         return 0;
1337 }
1338
1339 static int ironlake_do_reset(struct drm_device *dev)
1340 {
1341         struct drm_i915_private *dev_priv = dev->dev_private;
1342         int ret;
1343
1344         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1345                    ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1346         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1347                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
1348         if (ret)
1349                 return ret;
1350
1351         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1352                    ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1353         ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1354                         ILK_GRDOM_RESET_ENABLE) == 0, 500);
1355         if (ret)
1356                 return ret;
1357
1358         I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1359
1360         return 0;
1361 }
1362
1363 static int gen6_do_reset(struct drm_device *dev)
1364 {
1365         struct drm_i915_private *dev_priv = dev->dev_private;
1366         int     ret;
1367
1368         /* Reset the chip */
1369
1370         /* GEN6_GDRST is not in the gt power well, no need to check
1371          * for fifo space for the write or forcewake the chip for
1372          * the read
1373          */
1374         __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1375
1376         /* Spin waiting for the device to ack the reset request */
1377         ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1378
1379         intel_uncore_forcewake_reset(dev, true);
1380
1381         return ret;
1382 }
1383
1384 int intel_gpu_reset(struct drm_device *dev)
1385 {
1386         if (INTEL_INFO(dev)->gen >= 6)
1387                 return gen6_do_reset(dev);
1388         else if (IS_GEN5(dev))
1389                 return ironlake_do_reset(dev);
1390         else if (IS_G4X(dev))
1391                 return g4x_do_reset(dev);
1392         else if (IS_G33(dev))
1393                 return g33_do_reset(dev);
1394         else if (INTEL_INFO(dev)->gen >= 3)
1395                 return i915_do_reset(dev);
1396         else
1397                 return -ENODEV;
1398 }
1399
1400 void intel_uncore_check_errors(struct drm_device *dev)
1401 {
1402         struct drm_i915_private *dev_priv = dev->dev_private;
1403
1404         if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1405             (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1406                 DRM_ERROR("Unclaimed register before interrupt\n");
1407                 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1408         }
1409 }