Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38
39 static const u32 hpd_ibx[] = {
40         [HPD_CRT] = SDE_CRT_HOTPLUG,
41         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45 };
46
47 static const u32 hpd_cpt[] = {
48         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53 };
54
55 static const u32 hpd_mask_i915[] = {
56         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62 };
63
64 static const u32 hpd_status_gen4[] = {
65         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71 };
72
73 static const u32 hpd_status_i965[] = {
74          [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75          [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
76          [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
77          [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78          [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79          [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90
91 static void ibx_hpd_irq_setup(struct drm_device *dev);
92 static void i915_hpd_irq_setup(struct drm_device *dev);
93
94 /* For display hotplug interrupt */
95 static void
96 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97 {
98         if ((dev_priv->irq_mask & mask) != 0) {
99                 dev_priv->irq_mask &= ~mask;
100                 I915_WRITE(DEIMR, dev_priv->irq_mask);
101                 POSTING_READ(DEIMR);
102         }
103 }
104
105 static void
106 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
107 {
108         if ((dev_priv->irq_mask & mask) != mask) {
109                 dev_priv->irq_mask |= mask;
110                 I915_WRITE(DEIMR, dev_priv->irq_mask);
111                 POSTING_READ(DEIMR);
112         }
113 }
114
115 void
116 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
117 {
118         u32 reg = PIPESTAT(pipe);
119         u32 pipestat = I915_READ(reg) & 0x7fff0000;
120
121         if ((pipestat & mask) == mask)
122                 return;
123
124         /* Enable the interrupt, clear any pending status */
125         pipestat |= mask | (mask >> 16);
126         I915_WRITE(reg, pipestat);
127         POSTING_READ(reg);
128 }
129
130 void
131 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
132 {
133         u32 reg = PIPESTAT(pipe);
134         u32 pipestat = I915_READ(reg) & 0x7fff0000;
135
136         if ((pipestat & mask) == 0)
137                 return;
138
139         pipestat &= ~mask;
140         I915_WRITE(reg, pipestat);
141         POSTING_READ(reg);
142 }
143
144 /**
145  * intel_enable_asle - enable ASLE interrupt for OpRegion
146  */
147 void intel_enable_asle(struct drm_device *dev)
148 {
149         drm_i915_private_t *dev_priv = dev->dev_private;
150         unsigned long irqflags;
151
152         /* FIXME: opregion/asle for VLV */
153         if (IS_VALLEYVIEW(dev))
154                 return;
155
156         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
157
158         if (HAS_PCH_SPLIT(dev))
159                 ironlake_enable_display_irq(dev_priv, DE_GSE);
160         else {
161                 i915_enable_pipestat(dev_priv, 1,
162                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
163                 if (INTEL_INFO(dev)->gen >= 4)
164                         i915_enable_pipestat(dev_priv, 0,
165                                              PIPE_LEGACY_BLC_EVENT_ENABLE);
166         }
167
168         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
169 }
170
171 /**
172  * i915_pipe_enabled - check if a pipe is enabled
173  * @dev: DRM device
174  * @pipe: pipe to check
175  *
176  * Reading certain registers when the pipe is disabled can hang the chip.
177  * Use this routine to make sure the PLL is running and the pipe is active
178  * before reading such registers if unsure.
179  */
180 static int
181 i915_pipe_enabled(struct drm_device *dev, int pipe)
182 {
183         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
184         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
185                                                                       pipe);
186
187         return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
188 }
189
190 /* Called from drm generic code, passed a 'crtc', which
191  * we use as a pipe index
192  */
193 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
194 {
195         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
196         unsigned long high_frame;
197         unsigned long low_frame;
198         u32 high1, high2, low;
199
200         if (!i915_pipe_enabled(dev, pipe)) {
201                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
202                                 "pipe %c\n", pipe_name(pipe));
203                 return 0;
204         }
205
206         high_frame = PIPEFRAME(pipe);
207         low_frame = PIPEFRAMEPIXEL(pipe);
208
209         /*
210          * High & low register fields aren't synchronized, so make sure
211          * we get a low value that's stable across two reads of the high
212          * register.
213          */
214         do {
215                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
216                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
217                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
218         } while (high1 != high2);
219
220         high1 >>= PIPE_FRAME_HIGH_SHIFT;
221         low >>= PIPE_FRAME_LOW_SHIFT;
222         return (high1 << 8) | low;
223 }
224
225 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
226 {
227         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
228         int reg = PIPE_FRMCOUNT_GM45(pipe);
229
230         if (!i915_pipe_enabled(dev, pipe)) {
231                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
232                                  "pipe %c\n", pipe_name(pipe));
233                 return 0;
234         }
235
236         return I915_READ(reg);
237 }
238
239 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
240                              int *vpos, int *hpos)
241 {
242         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
243         u32 vbl = 0, position = 0;
244         int vbl_start, vbl_end, htotal, vtotal;
245         bool in_vbl = true;
246         int ret = 0;
247         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
248                                                                       pipe);
249
250         if (!i915_pipe_enabled(dev, pipe)) {
251                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
252                                  "pipe %c\n", pipe_name(pipe));
253                 return 0;
254         }
255
256         /* Get vtotal. */
257         vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
258
259         if (INTEL_INFO(dev)->gen >= 4) {
260                 /* No obvious pixelcount register. Only query vertical
261                  * scanout position from Display scan line register.
262                  */
263                 position = I915_READ(PIPEDSL(pipe));
264
265                 /* Decode into vertical scanout position. Don't have
266                  * horizontal scanout position.
267                  */
268                 *vpos = position & 0x1fff;
269                 *hpos = 0;
270         } else {
271                 /* Have access to pixelcount since start of frame.
272                  * We can split this into vertical and horizontal
273                  * scanout position.
274                  */
275                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
276
277                 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
278                 *vpos = position / htotal;
279                 *hpos = position - (*vpos * htotal);
280         }
281
282         /* Query vblank area. */
283         vbl = I915_READ(VBLANK(cpu_transcoder));
284
285         /* Test position against vblank region. */
286         vbl_start = vbl & 0x1fff;
287         vbl_end = (vbl >> 16) & 0x1fff;
288
289         if ((*vpos < vbl_start) || (*vpos > vbl_end))
290                 in_vbl = false;
291
292         /* Inside "upper part" of vblank area? Apply corrective offset: */
293         if (in_vbl && (*vpos >= vbl_start))
294                 *vpos = *vpos - vtotal;
295
296         /* Readouts valid? */
297         if (vbl > 0)
298                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
299
300         /* In vblank? */
301         if (in_vbl)
302                 ret |= DRM_SCANOUTPOS_INVBL;
303
304         return ret;
305 }
306
307 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
308                               int *max_error,
309                               struct timeval *vblank_time,
310                               unsigned flags)
311 {
312         struct drm_crtc *crtc;
313
314         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
315                 DRM_ERROR("Invalid crtc %d\n", pipe);
316                 return -EINVAL;
317         }
318
319         /* Get drm_crtc to timestamp: */
320         crtc = intel_get_crtc_for_pipe(dev, pipe);
321         if (crtc == NULL) {
322                 DRM_ERROR("Invalid crtc %d\n", pipe);
323                 return -EINVAL;
324         }
325
326         if (!crtc->enabled) {
327                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
328                 return -EBUSY;
329         }
330
331         /* Helper routine in DRM core does all the work: */
332         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
333                                                      vblank_time, flags,
334                                                      crtc);
335 }
336
337 /*
338  * Handle hotplug events outside the interrupt handler proper.
339  */
340 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
341
342 static void i915_hotplug_work_func(struct work_struct *work)
343 {
344         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
345                                                     hotplug_work);
346         struct drm_device *dev = dev_priv->dev;
347         struct drm_mode_config *mode_config = &dev->mode_config;
348         struct intel_connector *intel_connector;
349         struct intel_encoder *intel_encoder;
350         struct drm_connector *connector;
351         unsigned long irqflags;
352         bool hpd_disabled = false;
353
354         /* HPD irq before everything is fully set up. */
355         if (!dev_priv->enable_hotplug_processing)
356                 return;
357
358         mutex_lock(&mode_config->mutex);
359         DRM_DEBUG_KMS("running encoder hotplug functions\n");
360
361         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
362         list_for_each_entry(connector, &mode_config->connector_list, head) {
363                 intel_connector = to_intel_connector(connector);
364                 intel_encoder = intel_connector->encoder;
365                 if (intel_encoder->hpd_pin > HPD_NONE &&
366                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
367                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
368                         DRM_INFO("HPD interrupt storm detected on connector %s: "
369                                  "switching from hotplug detection to polling\n",
370                                 drm_get_connector_name(connector));
371                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
372                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
373                                 | DRM_CONNECTOR_POLL_DISCONNECT;
374                         hpd_disabled = true;
375                 }
376         }
377          /* if there were no outputs to poll, poll was disabled,
378           * therefore make sure it's enabled when disabling HPD on
379           * some connectors */
380         if (hpd_disabled) {
381                 drm_kms_helper_poll_enable(dev);
382                 mod_timer(&dev_priv->hotplug_reenable_timer,
383                           jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
384         }
385
386         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
387
388         list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
389                 if (intel_encoder->hot_plug)
390                         intel_encoder->hot_plug(intel_encoder);
391
392         mutex_unlock(&mode_config->mutex);
393
394         /* Just fire off a uevent and let userspace tell us what to do */
395         drm_helper_hpd_irq_event(dev);
396 }
397
398 static void ironlake_handle_rps_change(struct drm_device *dev)
399 {
400         drm_i915_private_t *dev_priv = dev->dev_private;
401         u32 busy_up, busy_down, max_avg, min_avg;
402         u8 new_delay;
403         unsigned long flags;
404
405         spin_lock_irqsave(&mchdev_lock, flags);
406
407         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
408
409         new_delay = dev_priv->ips.cur_delay;
410
411         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
412         busy_up = I915_READ(RCPREVBSYTUPAVG);
413         busy_down = I915_READ(RCPREVBSYTDNAVG);
414         max_avg = I915_READ(RCBMAXAVG);
415         min_avg = I915_READ(RCBMINAVG);
416
417         /* Handle RCS change request from hw */
418         if (busy_up > max_avg) {
419                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
420                         new_delay = dev_priv->ips.cur_delay - 1;
421                 if (new_delay < dev_priv->ips.max_delay)
422                         new_delay = dev_priv->ips.max_delay;
423         } else if (busy_down < min_avg) {
424                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
425                         new_delay = dev_priv->ips.cur_delay + 1;
426                 if (new_delay > dev_priv->ips.min_delay)
427                         new_delay = dev_priv->ips.min_delay;
428         }
429
430         if (ironlake_set_drps(dev, new_delay))
431                 dev_priv->ips.cur_delay = new_delay;
432
433         spin_unlock_irqrestore(&mchdev_lock, flags);
434
435         return;
436 }
437
438 static void notify_ring(struct drm_device *dev,
439                         struct intel_ring_buffer *ring)
440 {
441         struct drm_i915_private *dev_priv = dev->dev_private;
442
443         if (ring->obj == NULL)
444                 return;
445
446         trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
447
448         wake_up_all(&ring->irq_queue);
449         if (i915_enable_hangcheck) {
450                 dev_priv->gpu_error.hangcheck_count = 0;
451                 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
452                           round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
453         }
454 }
455
456 static void gen6_pm_rps_work(struct work_struct *work)
457 {
458         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
459                                                     rps.work);
460         u32 pm_iir, pm_imr;
461         u8 new_delay;
462
463         spin_lock_irq(&dev_priv->rps.lock);
464         pm_iir = dev_priv->rps.pm_iir;
465         dev_priv->rps.pm_iir = 0;
466         pm_imr = I915_READ(GEN6_PMIMR);
467         I915_WRITE(GEN6_PMIMR, 0);
468         spin_unlock_irq(&dev_priv->rps.lock);
469
470         if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
471                 return;
472
473         mutex_lock(&dev_priv->rps.hw_lock);
474
475         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
476                 new_delay = dev_priv->rps.cur_delay + 1;
477         else
478                 new_delay = dev_priv->rps.cur_delay - 1;
479
480         /* sysfs frequency interfaces may have snuck in while servicing the
481          * interrupt
482          */
483         if (!(new_delay > dev_priv->rps.max_delay ||
484               new_delay < dev_priv->rps.min_delay)) {
485                 gen6_set_rps(dev_priv->dev, new_delay);
486         }
487
488         mutex_unlock(&dev_priv->rps.hw_lock);
489 }
490
491
492 /**
493  * ivybridge_parity_work - Workqueue called when a parity error interrupt
494  * occurred.
495  * @work: workqueue struct
496  *
497  * Doesn't actually do anything except notify userspace. As a consequence of
498  * this event, userspace should try to remap the bad rows since statistically
499  * it is likely the same row is more likely to go bad again.
500  */
501 static void ivybridge_parity_work(struct work_struct *work)
502 {
503         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
504                                                     l3_parity.error_work);
505         u32 error_status, row, bank, subbank;
506         char *parity_event[5];
507         uint32_t misccpctl;
508         unsigned long flags;
509
510         /* We must turn off DOP level clock gating to access the L3 registers.
511          * In order to prevent a get/put style interface, acquire struct mutex
512          * any time we access those registers.
513          */
514         mutex_lock(&dev_priv->dev->struct_mutex);
515
516         misccpctl = I915_READ(GEN7_MISCCPCTL);
517         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
518         POSTING_READ(GEN7_MISCCPCTL);
519
520         error_status = I915_READ(GEN7_L3CDERRST1);
521         row = GEN7_PARITY_ERROR_ROW(error_status);
522         bank = GEN7_PARITY_ERROR_BANK(error_status);
523         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
524
525         I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
526                                     GEN7_L3CDERRST1_ENABLE);
527         POSTING_READ(GEN7_L3CDERRST1);
528
529         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
530
531         spin_lock_irqsave(&dev_priv->irq_lock, flags);
532         dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
533         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
534         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
535
536         mutex_unlock(&dev_priv->dev->struct_mutex);
537
538         parity_event[0] = "L3_PARITY_ERROR=1";
539         parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
540         parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
541         parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
542         parity_event[4] = NULL;
543
544         kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
545                            KOBJ_CHANGE, parity_event);
546
547         DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
548                   row, bank, subbank);
549
550         kfree(parity_event[3]);
551         kfree(parity_event[2]);
552         kfree(parity_event[1]);
553 }
554
555 static void ivybridge_handle_parity_error(struct drm_device *dev)
556 {
557         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
558         unsigned long flags;
559
560         if (!HAS_L3_GPU_CACHE(dev))
561                 return;
562
563         spin_lock_irqsave(&dev_priv->irq_lock, flags);
564         dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
565         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
566         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
567
568         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
569 }
570
571 static void snb_gt_irq_handler(struct drm_device *dev,
572                                struct drm_i915_private *dev_priv,
573                                u32 gt_iir)
574 {
575
576         if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
577                       GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
578                 notify_ring(dev, &dev_priv->ring[RCS]);
579         if (gt_iir & GEN6_BSD_USER_INTERRUPT)
580                 notify_ring(dev, &dev_priv->ring[VCS]);
581         if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
582                 notify_ring(dev, &dev_priv->ring[BCS]);
583
584         if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
585                       GT_GEN6_BSD_CS_ERROR_INTERRUPT |
586                       GT_RENDER_CS_ERROR_INTERRUPT)) {
587                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
588                 i915_handle_error(dev, false);
589         }
590
591         if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
592                 ivybridge_handle_parity_error(dev);
593 }
594
595 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
596                                 u32 pm_iir)
597 {
598         unsigned long flags;
599
600         /*
601          * IIR bits should never already be set because IMR should
602          * prevent an interrupt from being shown in IIR. The warning
603          * displays a case where we've unsafely cleared
604          * dev_priv->rps.pm_iir. Although missing an interrupt of the same
605          * type is not a problem, it displays a problem in the logic.
606          *
607          * The mask bit in IMR is cleared by dev_priv->rps.work.
608          */
609
610         spin_lock_irqsave(&dev_priv->rps.lock, flags);
611         dev_priv->rps.pm_iir |= pm_iir;
612         I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
613         POSTING_READ(GEN6_PMIMR);
614         spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
615
616         queue_work(dev_priv->wq, &dev_priv->rps.work);
617 }
618
619 #define HPD_STORM_DETECT_PERIOD 1000
620 #define HPD_STORM_THRESHOLD 5
621
622 static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
623                                             u32 hotplug_trigger,
624                                             const u32 *hpd)
625 {
626         drm_i915_private_t *dev_priv = dev->dev_private;
627         unsigned long irqflags;
628         int i;
629         bool ret = false;
630
631         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
632
633         for (i = 1; i < HPD_NUM_PINS; i++) {
634
635                 if (!(hpd[i] & hotplug_trigger) ||
636                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
637                         continue;
638
639                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
640                                    dev_priv->hpd_stats[i].hpd_last_jiffies
641                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
642                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
643                         dev_priv->hpd_stats[i].hpd_cnt = 0;
644                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
645                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
646                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
647                         ret = true;
648                 } else {
649                         dev_priv->hpd_stats[i].hpd_cnt++;
650                 }
651         }
652
653         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
654
655         return ret;
656 }
657
658 static void gmbus_irq_handler(struct drm_device *dev)
659 {
660         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
661
662         wake_up_all(&dev_priv->gmbus_wait_queue);
663 }
664
665 static void dp_aux_irq_handler(struct drm_device *dev)
666 {
667         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
668
669         wake_up_all(&dev_priv->gmbus_wait_queue);
670 }
671
672 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
673 {
674         struct drm_device *dev = (struct drm_device *) arg;
675         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
676         u32 iir, gt_iir, pm_iir;
677         irqreturn_t ret = IRQ_NONE;
678         unsigned long irqflags;
679         int pipe;
680         u32 pipe_stats[I915_MAX_PIPES];
681
682         atomic_inc(&dev_priv->irq_received);
683
684         while (true) {
685                 iir = I915_READ(VLV_IIR);
686                 gt_iir = I915_READ(GTIIR);
687                 pm_iir = I915_READ(GEN6_PMIIR);
688
689                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
690                         goto out;
691
692                 ret = IRQ_HANDLED;
693
694                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
695
696                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
697                 for_each_pipe(pipe) {
698                         int reg = PIPESTAT(pipe);
699                         pipe_stats[pipe] = I915_READ(reg);
700
701                         /*
702                          * Clear the PIPE*STAT regs before the IIR
703                          */
704                         if (pipe_stats[pipe] & 0x8000ffff) {
705                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
706                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
707                                                          pipe_name(pipe));
708                                 I915_WRITE(reg, pipe_stats[pipe]);
709                         }
710                 }
711                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
712
713                 for_each_pipe(pipe) {
714                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
715                                 drm_handle_vblank(dev, pipe);
716
717                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
718                                 intel_prepare_page_flip(dev, pipe);
719                                 intel_finish_page_flip(dev, pipe);
720                         }
721                 }
722
723                 /* Consume port.  Then clear IIR or we'll miss events */
724                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
725                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
726                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
727
728                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
729                                          hotplug_status);
730                         if (hotplug_trigger) {
731                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
732                                         i915_hpd_irq_setup(dev);
733                                 queue_work(dev_priv->wq,
734                                            &dev_priv->hotplug_work);
735                         }
736                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
737                         I915_READ(PORT_HOTPLUG_STAT);
738                 }
739
740                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
741                         gmbus_irq_handler(dev);
742
743                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
744                         gen6_queue_rps_work(dev_priv, pm_iir);
745
746                 I915_WRITE(GTIIR, gt_iir);
747                 I915_WRITE(GEN6_PMIIR, pm_iir);
748                 I915_WRITE(VLV_IIR, iir);
749         }
750
751 out:
752         return ret;
753 }
754
755 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
756 {
757         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
758         int pipe;
759         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
760
761         if (hotplug_trigger) {
762                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
763                         ibx_hpd_irq_setup(dev);
764                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
765         }
766         if (pch_iir & SDE_AUDIO_POWER_MASK)
767                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
768                                  (pch_iir & SDE_AUDIO_POWER_MASK) >>
769                                  SDE_AUDIO_POWER_SHIFT);
770
771         if (pch_iir & SDE_AUX_MASK)
772                 dp_aux_irq_handler(dev);
773
774         if (pch_iir & SDE_GMBUS)
775                 gmbus_irq_handler(dev);
776
777         if (pch_iir & SDE_AUDIO_HDCP_MASK)
778                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
779
780         if (pch_iir & SDE_AUDIO_TRANS_MASK)
781                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
782
783         if (pch_iir & SDE_POISON)
784                 DRM_ERROR("PCH poison interrupt\n");
785
786         if (pch_iir & SDE_FDI_MASK)
787                 for_each_pipe(pipe)
788                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
789                                          pipe_name(pipe),
790                                          I915_READ(FDI_RX_IIR(pipe)));
791
792         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
793                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
794
795         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
796                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
797
798         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
799                 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
800         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
801                 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
802 }
803
804 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
805 {
806         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
807         int pipe;
808         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
809
810         if (hotplug_trigger) {
811                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
812                         ibx_hpd_irq_setup(dev);
813                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
814         }
815         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
816                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
817                                  (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
818                                  SDE_AUDIO_POWER_SHIFT_CPT);
819
820         if (pch_iir & SDE_AUX_MASK_CPT)
821                 dp_aux_irq_handler(dev);
822
823         if (pch_iir & SDE_GMBUS_CPT)
824                 gmbus_irq_handler(dev);
825
826         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
827                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
828
829         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
830                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
831
832         if (pch_iir & SDE_FDI_MASK_CPT)
833                 for_each_pipe(pipe)
834                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
835                                          pipe_name(pipe),
836                                          I915_READ(FDI_RX_IIR(pipe)));
837 }
838
839 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
840 {
841         struct drm_device *dev = (struct drm_device *) arg;
842         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
843         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
844         irqreturn_t ret = IRQ_NONE;
845         int i;
846
847         atomic_inc(&dev_priv->irq_received);
848
849         /* disable master interrupt before clearing iir  */
850         de_ier = I915_READ(DEIER);
851         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
852
853         /* Disable south interrupts. We'll only write to SDEIIR once, so further
854          * interrupts will will be stored on its back queue, and then we'll be
855          * able to process them after we restore SDEIER (as soon as we restore
856          * it, we'll get an interrupt if SDEIIR still has something to process
857          * due to its back queue). */
858         if (!HAS_PCH_NOP(dev)) {
859                 sde_ier = I915_READ(SDEIER);
860                 I915_WRITE(SDEIER, 0);
861                 POSTING_READ(SDEIER);
862         }
863
864         gt_iir = I915_READ(GTIIR);
865         if (gt_iir) {
866                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
867                 I915_WRITE(GTIIR, gt_iir);
868                 ret = IRQ_HANDLED;
869         }
870
871         de_iir = I915_READ(DEIIR);
872         if (de_iir) {
873                 if (de_iir & DE_AUX_CHANNEL_A_IVB)
874                         dp_aux_irq_handler(dev);
875
876                 if (de_iir & DE_GSE_IVB)
877                         intel_opregion_gse_intr(dev);
878
879                 for (i = 0; i < 3; i++) {
880                         if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
881                                 drm_handle_vblank(dev, i);
882                         if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
883                                 intel_prepare_page_flip(dev, i);
884                                 intel_finish_page_flip_plane(dev, i);
885                         }
886                 }
887
888                 /* check event from PCH */
889                 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
890                         u32 pch_iir = I915_READ(SDEIIR);
891
892                         cpt_irq_handler(dev, pch_iir);
893
894                         /* clear PCH hotplug event before clear CPU irq */
895                         I915_WRITE(SDEIIR, pch_iir);
896                 }
897
898                 I915_WRITE(DEIIR, de_iir);
899                 ret = IRQ_HANDLED;
900         }
901
902         pm_iir = I915_READ(GEN6_PMIIR);
903         if (pm_iir) {
904                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
905                         gen6_queue_rps_work(dev_priv, pm_iir);
906                 I915_WRITE(GEN6_PMIIR, pm_iir);
907                 ret = IRQ_HANDLED;
908         }
909
910         I915_WRITE(DEIER, de_ier);
911         POSTING_READ(DEIER);
912         if (!HAS_PCH_NOP(dev)) {
913                 I915_WRITE(SDEIER, sde_ier);
914                 POSTING_READ(SDEIER);
915         }
916
917         return ret;
918 }
919
920 static void ilk_gt_irq_handler(struct drm_device *dev,
921                                struct drm_i915_private *dev_priv,
922                                u32 gt_iir)
923 {
924         if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
925                 notify_ring(dev, &dev_priv->ring[RCS]);
926         if (gt_iir & GT_BSD_USER_INTERRUPT)
927                 notify_ring(dev, &dev_priv->ring[VCS]);
928 }
929
930 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
931 {
932         struct drm_device *dev = (struct drm_device *) arg;
933         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
934         int ret = IRQ_NONE;
935         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
936
937         atomic_inc(&dev_priv->irq_received);
938
939         /* disable master interrupt before clearing iir  */
940         de_ier = I915_READ(DEIER);
941         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
942         POSTING_READ(DEIER);
943
944         /* Disable south interrupts. We'll only write to SDEIIR once, so further
945          * interrupts will will be stored on its back queue, and then we'll be
946          * able to process them after we restore SDEIER (as soon as we restore
947          * it, we'll get an interrupt if SDEIIR still has something to process
948          * due to its back queue). */
949         sde_ier = I915_READ(SDEIER);
950         I915_WRITE(SDEIER, 0);
951         POSTING_READ(SDEIER);
952
953         de_iir = I915_READ(DEIIR);
954         gt_iir = I915_READ(GTIIR);
955         pm_iir = I915_READ(GEN6_PMIIR);
956
957         if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
958                 goto done;
959
960         ret = IRQ_HANDLED;
961
962         if (IS_GEN5(dev))
963                 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
964         else
965                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
966
967         if (de_iir & DE_AUX_CHANNEL_A)
968                 dp_aux_irq_handler(dev);
969
970         if (de_iir & DE_GSE)
971                 intel_opregion_gse_intr(dev);
972
973         if (de_iir & DE_PIPEA_VBLANK)
974                 drm_handle_vblank(dev, 0);
975
976         if (de_iir & DE_PIPEB_VBLANK)
977                 drm_handle_vblank(dev, 1);
978
979         if (de_iir & DE_PLANEA_FLIP_DONE) {
980                 intel_prepare_page_flip(dev, 0);
981                 intel_finish_page_flip_plane(dev, 0);
982         }
983
984         if (de_iir & DE_PLANEB_FLIP_DONE) {
985                 intel_prepare_page_flip(dev, 1);
986                 intel_finish_page_flip_plane(dev, 1);
987         }
988
989         /* check event from PCH */
990         if (de_iir & DE_PCH_EVENT) {
991                 u32 pch_iir = I915_READ(SDEIIR);
992
993                 if (HAS_PCH_CPT(dev))
994                         cpt_irq_handler(dev, pch_iir);
995                 else
996                         ibx_irq_handler(dev, pch_iir);
997
998                 /* should clear PCH hotplug event before clear CPU irq */
999                 I915_WRITE(SDEIIR, pch_iir);
1000         }
1001
1002         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
1003                 ironlake_handle_rps_change(dev);
1004
1005         if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
1006                 gen6_queue_rps_work(dev_priv, pm_iir);
1007
1008         I915_WRITE(GTIIR, gt_iir);
1009         I915_WRITE(DEIIR, de_iir);
1010         I915_WRITE(GEN6_PMIIR, pm_iir);
1011
1012 done:
1013         I915_WRITE(DEIER, de_ier);
1014         POSTING_READ(DEIER);
1015         I915_WRITE(SDEIER, sde_ier);
1016         POSTING_READ(SDEIER);
1017
1018         return ret;
1019 }
1020
1021 /**
1022  * i915_error_work_func - do process context error handling work
1023  * @work: work struct
1024  *
1025  * Fire an error uevent so userspace can see that a hang or error
1026  * was detected.
1027  */
1028 static void i915_error_work_func(struct work_struct *work)
1029 {
1030         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1031                                                     work);
1032         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1033                                                     gpu_error);
1034         struct drm_device *dev = dev_priv->dev;
1035         struct intel_ring_buffer *ring;
1036         char *error_event[] = { "ERROR=1", NULL };
1037         char *reset_event[] = { "RESET=1", NULL };
1038         char *reset_done_event[] = { "ERROR=0", NULL };
1039         int i, ret;
1040
1041         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1042
1043         /*
1044          * Note that there's only one work item which does gpu resets, so we
1045          * need not worry about concurrent gpu resets potentially incrementing
1046          * error->reset_counter twice. We only need to take care of another
1047          * racing irq/hangcheck declaring the gpu dead for a second time. A
1048          * quick check for that is good enough: schedule_work ensures the
1049          * correct ordering between hang detection and this work item, and since
1050          * the reset in-progress bit is only ever set by code outside of this
1051          * work we don't need to worry about any other races.
1052          */
1053         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1054                 DRM_DEBUG_DRIVER("resetting chip\n");
1055                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1056                                    reset_event);
1057
1058                 ret = i915_reset(dev);
1059
1060                 if (ret == 0) {
1061                         /*
1062                          * After all the gem state is reset, increment the reset
1063                          * counter and wake up everyone waiting for the reset to
1064                          * complete.
1065                          *
1066                          * Since unlock operations are a one-sided barrier only,
1067                          * we need to insert a barrier here to order any seqno
1068                          * updates before
1069                          * the counter increment.
1070                          */
1071                         smp_mb__before_atomic_inc();
1072                         atomic_inc(&dev_priv->gpu_error.reset_counter);
1073
1074                         kobject_uevent_env(&dev->primary->kdev.kobj,
1075                                            KOBJ_CHANGE, reset_done_event);
1076                 } else {
1077                         atomic_set(&error->reset_counter, I915_WEDGED);
1078                 }
1079
1080                 for_each_ring(ring, dev_priv, i)
1081                         wake_up_all(&ring->irq_queue);
1082
1083                 intel_display_handle_reset(dev);
1084
1085                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1086         }
1087 }
1088
1089 /* NB: please notice the memset */
1090 static void i915_get_extra_instdone(struct drm_device *dev,
1091                                     uint32_t *instdone)
1092 {
1093         struct drm_i915_private *dev_priv = dev->dev_private;
1094         memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1095
1096         switch(INTEL_INFO(dev)->gen) {
1097         case 2:
1098         case 3:
1099                 instdone[0] = I915_READ(INSTDONE);
1100                 break;
1101         case 4:
1102         case 5:
1103         case 6:
1104                 instdone[0] = I915_READ(INSTDONE_I965);
1105                 instdone[1] = I915_READ(INSTDONE1);
1106                 break;
1107         default:
1108                 WARN_ONCE(1, "Unsupported platform\n");
1109         case 7:
1110                 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1111                 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1112                 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1113                 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1114                 break;
1115         }
1116 }
1117
1118 #ifdef CONFIG_DEBUG_FS
1119 static struct drm_i915_error_object *
1120 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1121                                struct drm_i915_gem_object *src,
1122                                const int num_pages)
1123 {
1124         struct drm_i915_error_object *dst;
1125         int i;
1126         u32 reloc_offset;
1127
1128         if (src == NULL || src->pages == NULL)
1129                 return NULL;
1130
1131         dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1132         if (dst == NULL)
1133                 return NULL;
1134
1135         reloc_offset = src->gtt_offset;
1136         for (i = 0; i < num_pages; i++) {
1137                 unsigned long flags;
1138                 void *d;
1139
1140                 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1141                 if (d == NULL)
1142                         goto unwind;
1143
1144                 local_irq_save(flags);
1145                 if (reloc_offset < dev_priv->gtt.mappable_end &&
1146                     src->has_global_gtt_mapping) {
1147                         void __iomem *s;
1148
1149                         /* Simply ignore tiling or any overlapping fence.
1150                          * It's part of the error state, and this hopefully
1151                          * captures what the GPU read.
1152                          */
1153
1154                         s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1155                                                      reloc_offset);
1156                         memcpy_fromio(d, s, PAGE_SIZE);
1157                         io_mapping_unmap_atomic(s);
1158                 } else if (src->stolen) {
1159                         unsigned long offset;
1160
1161                         offset = dev_priv->mm.stolen_base;
1162                         offset += src->stolen->start;
1163                         offset += i << PAGE_SHIFT;
1164
1165                         memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1166                 } else {
1167                         struct page *page;
1168                         void *s;
1169
1170                         page = i915_gem_object_get_page(src, i);
1171
1172                         drm_clflush_pages(&page, 1);
1173
1174                         s = kmap_atomic(page);
1175                         memcpy(d, s, PAGE_SIZE);
1176                         kunmap_atomic(s);
1177
1178                         drm_clflush_pages(&page, 1);
1179                 }
1180                 local_irq_restore(flags);
1181
1182                 dst->pages[i] = d;
1183
1184                 reloc_offset += PAGE_SIZE;
1185         }
1186         dst->page_count = num_pages;
1187         dst->gtt_offset = src->gtt_offset;
1188
1189         return dst;
1190
1191 unwind:
1192         while (i--)
1193                 kfree(dst->pages[i]);
1194         kfree(dst);
1195         return NULL;
1196 }
1197 #define i915_error_object_create(dev_priv, src) \
1198         i915_error_object_create_sized((dev_priv), (src), \
1199                                        (src)->base.size>>PAGE_SHIFT)
1200
1201 static void
1202 i915_error_object_free(struct drm_i915_error_object *obj)
1203 {
1204         int page;
1205
1206         if (obj == NULL)
1207                 return;
1208
1209         for (page = 0; page < obj->page_count; page++)
1210                 kfree(obj->pages[page]);
1211
1212         kfree(obj);
1213 }
1214
1215 void
1216 i915_error_state_free(struct kref *error_ref)
1217 {
1218         struct drm_i915_error_state *error = container_of(error_ref,
1219                                                           typeof(*error), ref);
1220         int i;
1221
1222         for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1223                 i915_error_object_free(error->ring[i].batchbuffer);
1224                 i915_error_object_free(error->ring[i].ringbuffer);
1225                 kfree(error->ring[i].requests);
1226         }
1227
1228         kfree(error->active_bo);
1229         kfree(error->overlay);
1230         kfree(error);
1231 }
1232 static void capture_bo(struct drm_i915_error_buffer *err,
1233                        struct drm_i915_gem_object *obj)
1234 {
1235         err->size = obj->base.size;
1236         err->name = obj->base.name;
1237         err->rseqno = obj->last_read_seqno;
1238         err->wseqno = obj->last_write_seqno;
1239         err->gtt_offset = obj->gtt_offset;
1240         err->read_domains = obj->base.read_domains;
1241         err->write_domain = obj->base.write_domain;
1242         err->fence_reg = obj->fence_reg;
1243         err->pinned = 0;
1244         if (obj->pin_count > 0)
1245                 err->pinned = 1;
1246         if (obj->user_pin_count > 0)
1247                 err->pinned = -1;
1248         err->tiling = obj->tiling_mode;
1249         err->dirty = obj->dirty;
1250         err->purgeable = obj->madv != I915_MADV_WILLNEED;
1251         err->ring = obj->ring ? obj->ring->id : -1;
1252         err->cache_level = obj->cache_level;
1253 }
1254
1255 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1256                              int count, struct list_head *head)
1257 {
1258         struct drm_i915_gem_object *obj;
1259         int i = 0;
1260
1261         list_for_each_entry(obj, head, mm_list) {
1262                 capture_bo(err++, obj);
1263                 if (++i == count)
1264                         break;
1265         }
1266
1267         return i;
1268 }
1269
1270 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1271                              int count, struct list_head *head)
1272 {
1273         struct drm_i915_gem_object *obj;
1274         int i = 0;
1275
1276         list_for_each_entry(obj, head, gtt_list) {
1277                 if (obj->pin_count == 0)
1278                         continue;
1279
1280                 capture_bo(err++, obj);
1281                 if (++i == count)
1282                         break;
1283         }
1284
1285         return i;
1286 }
1287
1288 static void i915_gem_record_fences(struct drm_device *dev,
1289                                    struct drm_i915_error_state *error)
1290 {
1291         struct drm_i915_private *dev_priv = dev->dev_private;
1292         int i;
1293
1294         /* Fences */
1295         switch (INTEL_INFO(dev)->gen) {
1296         case 7:
1297         case 6:
1298                 for (i = 0; i < dev_priv->num_fence_regs; i++)
1299                         error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1300                 break;
1301         case 5:
1302         case 4:
1303                 for (i = 0; i < 16; i++)
1304                         error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1305                 break;
1306         case 3:
1307                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1308                         for (i = 0; i < 8; i++)
1309                                 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1310         case 2:
1311                 for (i = 0; i < 8; i++)
1312                         error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1313                 break;
1314
1315         default:
1316                 BUG();
1317         }
1318 }
1319
1320 static struct drm_i915_error_object *
1321 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1322                              struct intel_ring_buffer *ring)
1323 {
1324         struct drm_i915_gem_object *obj;
1325         u32 seqno;
1326
1327         if (!ring->get_seqno)
1328                 return NULL;
1329
1330         if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1331                 u32 acthd = I915_READ(ACTHD);
1332
1333                 if (WARN_ON(ring->id != RCS))
1334                         return NULL;
1335
1336                 obj = ring->private;
1337                 if (acthd >= obj->gtt_offset &&
1338                     acthd < obj->gtt_offset + obj->base.size)
1339                         return i915_error_object_create(dev_priv, obj);
1340         }
1341
1342         seqno = ring->get_seqno(ring, false);
1343         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1344                 if (obj->ring != ring)
1345                         continue;
1346
1347                 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1348                         continue;
1349
1350                 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1351                         continue;
1352
1353                 /* We need to copy these to an anonymous buffer as the simplest
1354                  * method to avoid being overwritten by userspace.
1355                  */
1356                 return i915_error_object_create(dev_priv, obj);
1357         }
1358
1359         return NULL;
1360 }
1361
1362 static void i915_record_ring_state(struct drm_device *dev,
1363                                    struct drm_i915_error_state *error,
1364                                    struct intel_ring_buffer *ring)
1365 {
1366         struct drm_i915_private *dev_priv = dev->dev_private;
1367
1368         if (INTEL_INFO(dev)->gen >= 6) {
1369                 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1370                 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1371                 error->semaphore_mboxes[ring->id][0]
1372                         = I915_READ(RING_SYNC_0(ring->mmio_base));
1373                 error->semaphore_mboxes[ring->id][1]
1374                         = I915_READ(RING_SYNC_1(ring->mmio_base));
1375                 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1376                 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1377         }
1378
1379         if (INTEL_INFO(dev)->gen >= 4) {
1380                 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1381                 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1382                 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1383                 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1384                 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1385                 if (ring->id == RCS)
1386                         error->bbaddr = I915_READ64(BB_ADDR);
1387         } else {
1388                 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1389                 error->ipeir[ring->id] = I915_READ(IPEIR);
1390                 error->ipehr[ring->id] = I915_READ(IPEHR);
1391                 error->instdone[ring->id] = I915_READ(INSTDONE);
1392         }
1393
1394         error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1395         error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1396         error->seqno[ring->id] = ring->get_seqno(ring, false);
1397         error->acthd[ring->id] = intel_ring_get_active_head(ring);
1398         error->head[ring->id] = I915_READ_HEAD(ring);
1399         error->tail[ring->id] = I915_READ_TAIL(ring);
1400         error->ctl[ring->id] = I915_READ_CTL(ring);
1401
1402         error->cpu_ring_head[ring->id] = ring->head;
1403         error->cpu_ring_tail[ring->id] = ring->tail;
1404 }
1405
1406
1407 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1408                                            struct drm_i915_error_state *error,
1409                                            struct drm_i915_error_ring *ering)
1410 {
1411         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1412         struct drm_i915_gem_object *obj;
1413
1414         /* Currently render ring is the only HW context user */
1415         if (ring->id != RCS || !error->ccid)
1416                 return;
1417
1418         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1419                 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1420                         ering->ctx = i915_error_object_create_sized(dev_priv,
1421                                                                     obj, 1);
1422                 }
1423         }
1424 }
1425
1426 static void i915_gem_record_rings(struct drm_device *dev,
1427                                   struct drm_i915_error_state *error)
1428 {
1429         struct drm_i915_private *dev_priv = dev->dev_private;
1430         struct intel_ring_buffer *ring;
1431         struct drm_i915_gem_request *request;
1432         int i, count;
1433
1434         for_each_ring(ring, dev_priv, i) {
1435                 i915_record_ring_state(dev, error, ring);
1436
1437                 error->ring[i].batchbuffer =
1438                         i915_error_first_batchbuffer(dev_priv, ring);
1439
1440                 error->ring[i].ringbuffer =
1441                         i915_error_object_create(dev_priv, ring->obj);
1442
1443
1444                 i915_gem_record_active_context(ring, error, &error->ring[i]);
1445
1446                 count = 0;
1447                 list_for_each_entry(request, &ring->request_list, list)
1448                         count++;
1449
1450                 error->ring[i].num_requests = count;
1451                 error->ring[i].requests =
1452                         kmalloc(count*sizeof(struct drm_i915_error_request),
1453                                 GFP_ATOMIC);
1454                 if (error->ring[i].requests == NULL) {
1455                         error->ring[i].num_requests = 0;
1456                         continue;
1457                 }
1458
1459                 count = 0;
1460                 list_for_each_entry(request, &ring->request_list, list) {
1461                         struct drm_i915_error_request *erq;
1462
1463                         erq = &error->ring[i].requests[count++];
1464                         erq->seqno = request->seqno;
1465                         erq->jiffies = request->emitted_jiffies;
1466                         erq->tail = request->tail;
1467                 }
1468         }
1469 }
1470
1471 /**
1472  * i915_capture_error_state - capture an error record for later analysis
1473  * @dev: drm device
1474  *
1475  * Should be called when an error is detected (either a hang or an error
1476  * interrupt) to capture error state from the time of the error.  Fills
1477  * out a structure which becomes available in debugfs for user level tools
1478  * to pick up.
1479  */
1480 static void i915_capture_error_state(struct drm_device *dev)
1481 {
1482         struct drm_i915_private *dev_priv = dev->dev_private;
1483         struct drm_i915_gem_object *obj;
1484         struct drm_i915_error_state *error;
1485         unsigned long flags;
1486         int i, pipe;
1487
1488         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1489         error = dev_priv->gpu_error.first_error;
1490         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1491         if (error)
1492                 return;
1493
1494         /* Account for pipe specific data like PIPE*STAT */
1495         error = kzalloc(sizeof(*error), GFP_ATOMIC);
1496         if (!error) {
1497                 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1498                 return;
1499         }
1500
1501         DRM_INFO("capturing error event; look for more information in "
1502                  "/sys/kernel/debug/dri/%d/i915_error_state\n",
1503                  dev->primary->index);
1504
1505         kref_init(&error->ref);
1506         error->eir = I915_READ(EIR);
1507         error->pgtbl_er = I915_READ(PGTBL_ER);
1508         if (HAS_HW_CONTEXTS(dev))
1509                 error->ccid = I915_READ(CCID);
1510
1511         if (HAS_PCH_SPLIT(dev))
1512                 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1513         else if (IS_VALLEYVIEW(dev))
1514                 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1515         else if (IS_GEN2(dev))
1516                 error->ier = I915_READ16(IER);
1517         else
1518                 error->ier = I915_READ(IER);
1519
1520         if (INTEL_INFO(dev)->gen >= 6)
1521                 error->derrmr = I915_READ(DERRMR);
1522
1523         if (IS_VALLEYVIEW(dev))
1524                 error->forcewake = I915_READ(FORCEWAKE_VLV);
1525         else if (INTEL_INFO(dev)->gen >= 7)
1526                 error->forcewake = I915_READ(FORCEWAKE_MT);
1527         else if (INTEL_INFO(dev)->gen == 6)
1528                 error->forcewake = I915_READ(FORCEWAKE);
1529
1530         if (!HAS_PCH_SPLIT(dev))
1531                 for_each_pipe(pipe)
1532                         error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1533
1534         if (INTEL_INFO(dev)->gen >= 6) {
1535                 error->error = I915_READ(ERROR_GEN6);
1536                 error->done_reg = I915_READ(DONE_REG);
1537         }
1538
1539         if (INTEL_INFO(dev)->gen == 7)
1540                 error->err_int = I915_READ(GEN7_ERR_INT);
1541
1542         i915_get_extra_instdone(dev, error->extra_instdone);
1543
1544         i915_gem_record_fences(dev, error);
1545         i915_gem_record_rings(dev, error);
1546
1547         /* Record buffers on the active and pinned lists. */
1548         error->active_bo = NULL;
1549         error->pinned_bo = NULL;
1550
1551         i = 0;
1552         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1553                 i++;
1554         error->active_bo_count = i;
1555         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1556                 if (obj->pin_count)
1557                         i++;
1558         error->pinned_bo_count = i - error->active_bo_count;
1559
1560         error->active_bo = NULL;
1561         error->pinned_bo = NULL;
1562         if (i) {
1563                 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1564                                            GFP_ATOMIC);
1565                 if (error->active_bo)
1566                         error->pinned_bo =
1567                                 error->active_bo + error->active_bo_count;
1568         }
1569
1570         if (error->active_bo)
1571                 error->active_bo_count =
1572                         capture_active_bo(error->active_bo,
1573                                           error->active_bo_count,
1574                                           &dev_priv->mm.active_list);
1575
1576         if (error->pinned_bo)
1577                 error->pinned_bo_count =
1578                         capture_pinned_bo(error->pinned_bo,
1579                                           error->pinned_bo_count,
1580                                           &dev_priv->mm.bound_list);
1581
1582         do_gettimeofday(&error->time);
1583
1584         error->overlay = intel_overlay_capture_error_state(dev);
1585         error->display = intel_display_capture_error_state(dev);
1586
1587         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1588         if (dev_priv->gpu_error.first_error == NULL) {
1589                 dev_priv->gpu_error.first_error = error;
1590                 error = NULL;
1591         }
1592         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1593
1594         if (error)
1595                 i915_error_state_free(&error->ref);
1596 }
1597
1598 void i915_destroy_error_state(struct drm_device *dev)
1599 {
1600         struct drm_i915_private *dev_priv = dev->dev_private;
1601         struct drm_i915_error_state *error;
1602         unsigned long flags;
1603
1604         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1605         error = dev_priv->gpu_error.first_error;
1606         dev_priv->gpu_error.first_error = NULL;
1607         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1608
1609         if (error)
1610                 kref_put(&error->ref, i915_error_state_free);
1611 }
1612 #else
1613 #define i915_capture_error_state(x)
1614 #endif
1615
1616 static void i915_report_and_clear_eir(struct drm_device *dev)
1617 {
1618         struct drm_i915_private *dev_priv = dev->dev_private;
1619         uint32_t instdone[I915_NUM_INSTDONE_REG];
1620         u32 eir = I915_READ(EIR);
1621         int pipe, i;
1622
1623         if (!eir)
1624                 return;
1625
1626         pr_err("render error detected, EIR: 0x%08x\n", eir);
1627
1628         i915_get_extra_instdone(dev, instdone);
1629
1630         if (IS_G4X(dev)) {
1631                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1632                         u32 ipeir = I915_READ(IPEIR_I965);
1633
1634                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1635                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1636                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1637                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1638                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1639                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1640                         I915_WRITE(IPEIR_I965, ipeir);
1641                         POSTING_READ(IPEIR_I965);
1642                 }
1643                 if (eir & GM45_ERROR_PAGE_TABLE) {
1644                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1645                         pr_err("page table error\n");
1646                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1647                         I915_WRITE(PGTBL_ER, pgtbl_err);
1648                         POSTING_READ(PGTBL_ER);
1649                 }
1650         }
1651
1652         if (!IS_GEN2(dev)) {
1653                 if (eir & I915_ERROR_PAGE_TABLE) {
1654                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1655                         pr_err("page table error\n");
1656                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1657                         I915_WRITE(PGTBL_ER, pgtbl_err);
1658                         POSTING_READ(PGTBL_ER);
1659                 }
1660         }
1661
1662         if (eir & I915_ERROR_MEMORY_REFRESH) {
1663                 pr_err("memory refresh error:\n");
1664                 for_each_pipe(pipe)
1665                         pr_err("pipe %c stat: 0x%08x\n",
1666                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1667                 /* pipestat has already been acked */
1668         }
1669         if (eir & I915_ERROR_INSTRUCTION) {
1670                 pr_err("instruction error\n");
1671                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1672                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1673                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1674                 if (INTEL_INFO(dev)->gen < 4) {
1675                         u32 ipeir = I915_READ(IPEIR);
1676
1677                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1678                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1679                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1680                         I915_WRITE(IPEIR, ipeir);
1681                         POSTING_READ(IPEIR);
1682                 } else {
1683                         u32 ipeir = I915_READ(IPEIR_I965);
1684
1685                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1686                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1687                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1688                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1689                         I915_WRITE(IPEIR_I965, ipeir);
1690                         POSTING_READ(IPEIR_I965);
1691                 }
1692         }
1693
1694         I915_WRITE(EIR, eir);
1695         POSTING_READ(EIR);
1696         eir = I915_READ(EIR);
1697         if (eir) {
1698                 /*
1699                  * some errors might have become stuck,
1700                  * mask them.
1701                  */
1702                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1703                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1704                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1705         }
1706 }
1707
1708 /**
1709  * i915_handle_error - handle an error interrupt
1710  * @dev: drm device
1711  *
1712  * Do some basic checking of regsiter state at error interrupt time and
1713  * dump it to the syslog.  Also call i915_capture_error_state() to make
1714  * sure we get a record and make it available in debugfs.  Fire a uevent
1715  * so userspace knows something bad happened (should trigger collection
1716  * of a ring dump etc.).
1717  */
1718 void i915_handle_error(struct drm_device *dev, bool wedged)
1719 {
1720         struct drm_i915_private *dev_priv = dev->dev_private;
1721         struct intel_ring_buffer *ring;
1722         int i;
1723
1724         i915_capture_error_state(dev);
1725         i915_report_and_clear_eir(dev);
1726
1727         if (wedged) {
1728                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1729                                 &dev_priv->gpu_error.reset_counter);
1730
1731                 /*
1732                  * Wakeup waiting processes so that the reset work item
1733                  * doesn't deadlock trying to grab various locks.
1734                  */
1735                 for_each_ring(ring, dev_priv, i)
1736                         wake_up_all(&ring->irq_queue);
1737         }
1738
1739         queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
1740 }
1741
1742 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1743 {
1744         drm_i915_private_t *dev_priv = dev->dev_private;
1745         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1746         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1747         struct drm_i915_gem_object *obj;
1748         struct intel_unpin_work *work;
1749         unsigned long flags;
1750         bool stall_detected;
1751
1752         /* Ignore early vblank irqs */
1753         if (intel_crtc == NULL)
1754                 return;
1755
1756         spin_lock_irqsave(&dev->event_lock, flags);
1757         work = intel_crtc->unpin_work;
1758
1759         if (work == NULL ||
1760             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1761             !work->enable_stall_check) {
1762                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1763                 spin_unlock_irqrestore(&dev->event_lock, flags);
1764                 return;
1765         }
1766
1767         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1768         obj = work->pending_flip_obj;
1769         if (INTEL_INFO(dev)->gen >= 4) {
1770                 int dspsurf = DSPSURF(intel_crtc->plane);
1771                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1772                                         obj->gtt_offset;
1773         } else {
1774                 int dspaddr = DSPADDR(intel_crtc->plane);
1775                 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1776                                                         crtc->y * crtc->fb->pitches[0] +
1777                                                         crtc->x * crtc->fb->bits_per_pixel/8);
1778         }
1779
1780         spin_unlock_irqrestore(&dev->event_lock, flags);
1781
1782         if (stall_detected) {
1783                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1784                 intel_prepare_page_flip(dev, intel_crtc->plane);
1785         }
1786 }
1787
1788 /* Called from drm generic code, passed 'crtc' which
1789  * we use as a pipe index
1790  */
1791 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1792 {
1793         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1794         unsigned long irqflags;
1795
1796         if (!i915_pipe_enabled(dev, pipe))
1797                 return -EINVAL;
1798
1799         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1800         if (INTEL_INFO(dev)->gen >= 4)
1801                 i915_enable_pipestat(dev_priv, pipe,
1802                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1803         else
1804                 i915_enable_pipestat(dev_priv, pipe,
1805                                      PIPE_VBLANK_INTERRUPT_ENABLE);
1806
1807         /* maintain vblank delivery even in deep C-states */
1808         if (dev_priv->info->gen == 3)
1809                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1810         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1811
1812         return 0;
1813 }
1814
1815 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1816 {
1817         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1818         unsigned long irqflags;
1819
1820         if (!i915_pipe_enabled(dev, pipe))
1821                 return -EINVAL;
1822
1823         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1824         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1825                                     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1826         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1827
1828         return 0;
1829 }
1830
1831 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1832 {
1833         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1834         unsigned long irqflags;
1835
1836         if (!i915_pipe_enabled(dev, pipe))
1837                 return -EINVAL;
1838
1839         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1840         ironlake_enable_display_irq(dev_priv,
1841                                     DE_PIPEA_VBLANK_IVB << (5 * pipe));
1842         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1843
1844         return 0;
1845 }
1846
1847 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1848 {
1849         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1850         unsigned long irqflags;
1851         u32 imr;
1852
1853         if (!i915_pipe_enabled(dev, pipe))
1854                 return -EINVAL;
1855
1856         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1857         imr = I915_READ(VLV_IMR);
1858         if (pipe == 0)
1859                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1860         else
1861                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1862         I915_WRITE(VLV_IMR, imr);
1863         i915_enable_pipestat(dev_priv, pipe,
1864                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
1865         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1866
1867         return 0;
1868 }
1869
1870 /* Called from drm generic code, passed 'crtc' which
1871  * we use as a pipe index
1872  */
1873 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1874 {
1875         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1876         unsigned long irqflags;
1877
1878         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1879         if (dev_priv->info->gen == 3)
1880                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1881
1882         i915_disable_pipestat(dev_priv, pipe,
1883                               PIPE_VBLANK_INTERRUPT_ENABLE |
1884                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1885         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1886 }
1887
1888 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1889 {
1890         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1891         unsigned long irqflags;
1892
1893         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1894         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1895                                      DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1896         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1897 }
1898
1899 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1900 {
1901         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1902         unsigned long irqflags;
1903
1904         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1905         ironlake_disable_display_irq(dev_priv,
1906                                      DE_PIPEA_VBLANK_IVB << (pipe * 5));
1907         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1908 }
1909
1910 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1911 {
1912         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1913         unsigned long irqflags;
1914         u32 imr;
1915
1916         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1917         i915_disable_pipestat(dev_priv, pipe,
1918                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1919         imr = I915_READ(VLV_IMR);
1920         if (pipe == 0)
1921                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1922         else
1923                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1924         I915_WRITE(VLV_IMR, imr);
1925         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1926 }
1927
1928 static u32
1929 ring_last_seqno(struct intel_ring_buffer *ring)
1930 {
1931         return list_entry(ring->request_list.prev,
1932                           struct drm_i915_gem_request, list)->seqno;
1933 }
1934
1935 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1936 {
1937         if (list_empty(&ring->request_list) ||
1938             i915_seqno_passed(ring->get_seqno(ring, false),
1939                               ring_last_seqno(ring))) {
1940                 /* Issue a wake-up to catch stuck h/w. */
1941                 if (waitqueue_active(&ring->irq_queue)) {
1942                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1943                                   ring->name);
1944                         wake_up_all(&ring->irq_queue);
1945                         *err = true;
1946                 }
1947                 return true;
1948         }
1949         return false;
1950 }
1951
1952 static bool semaphore_passed(struct intel_ring_buffer *ring)
1953 {
1954         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1955         u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1956         struct intel_ring_buffer *signaller;
1957         u32 cmd, ipehr, acthd_min;
1958
1959         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1960         if ((ipehr & ~(0x3 << 16)) !=
1961             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1962                 return false;
1963
1964         /* ACTHD is likely pointing to the dword after the actual command,
1965          * so scan backwards until we find the MBOX.
1966          */
1967         acthd_min = max((int)acthd - 3 * 4, 0);
1968         do {
1969                 cmd = ioread32(ring->virtual_start + acthd);
1970                 if (cmd == ipehr)
1971                         break;
1972
1973                 acthd -= 4;
1974                 if (acthd < acthd_min)
1975                         return false;
1976         } while (1);
1977
1978         signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1979         return i915_seqno_passed(signaller->get_seqno(signaller, false),
1980                                  ioread32(ring->virtual_start+acthd+4)+1);
1981 }
1982
1983 static bool kick_ring(struct intel_ring_buffer *ring)
1984 {
1985         struct drm_device *dev = ring->dev;
1986         struct drm_i915_private *dev_priv = dev->dev_private;
1987         u32 tmp = I915_READ_CTL(ring);
1988         if (tmp & RING_WAIT) {
1989                 DRM_ERROR("Kicking stuck wait on %s\n",
1990                           ring->name);
1991                 I915_WRITE_CTL(ring, tmp);
1992                 return true;
1993         }
1994
1995         if (INTEL_INFO(dev)->gen >= 6 &&
1996             tmp & RING_WAIT_SEMAPHORE &&
1997             semaphore_passed(ring)) {
1998                 DRM_ERROR("Kicking stuck semaphore on %s\n",
1999                           ring->name);
2000                 I915_WRITE_CTL(ring, tmp);
2001                 return true;
2002         }
2003         return false;
2004 }
2005
2006 static bool i915_hangcheck_hung(struct drm_device *dev)
2007 {
2008         drm_i915_private_t *dev_priv = dev->dev_private;
2009
2010         if (dev_priv->gpu_error.hangcheck_count++ > 1) {
2011                 bool hung = true;
2012
2013                 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
2014                 i915_handle_error(dev, true);
2015
2016                 if (!IS_GEN2(dev)) {
2017                         struct intel_ring_buffer *ring;
2018                         int i;
2019
2020                         /* Is the chip hanging on a WAIT_FOR_EVENT?
2021                          * If so we can simply poke the RB_WAIT bit
2022                          * and break the hang. This should work on
2023                          * all but the second generation chipsets.
2024                          */
2025                         for_each_ring(ring, dev_priv, i)
2026                                 hung &= !kick_ring(ring);
2027                 }
2028
2029                 return hung;
2030         }
2031
2032         return false;
2033 }
2034
2035 /**
2036  * This is called when the chip hasn't reported back with completed
2037  * batchbuffers in a long time. The first time this is called we simply record
2038  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
2039  * again, we assume the chip is wedged and try to fix it.
2040  */
2041 void i915_hangcheck_elapsed(unsigned long data)
2042 {
2043         struct drm_device *dev = (struct drm_device *)data;
2044         drm_i915_private_t *dev_priv = dev->dev_private;
2045         uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
2046         struct intel_ring_buffer *ring;
2047         bool err = false, idle;
2048         int i;
2049
2050         if (!i915_enable_hangcheck)
2051                 return;
2052
2053         memset(acthd, 0, sizeof(acthd));
2054         idle = true;
2055         for_each_ring(ring, dev_priv, i) {
2056             idle &= i915_hangcheck_ring_idle(ring, &err);
2057             acthd[i] = intel_ring_get_active_head(ring);
2058         }
2059
2060         /* If all work is done then ACTHD clearly hasn't advanced. */
2061         if (idle) {
2062                 if (err) {
2063                         if (i915_hangcheck_hung(dev))
2064                                 return;
2065
2066                         goto repeat;
2067                 }
2068
2069                 dev_priv->gpu_error.hangcheck_count = 0;
2070                 return;
2071         }
2072
2073         i915_get_extra_instdone(dev, instdone);
2074         if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
2075                    sizeof(acthd)) == 0 &&
2076             memcmp(dev_priv->gpu_error.prev_instdone, instdone,
2077                    sizeof(instdone)) == 0) {
2078                 if (i915_hangcheck_hung(dev))
2079                         return;
2080         } else {
2081                 dev_priv->gpu_error.hangcheck_count = 0;
2082
2083                 memcpy(dev_priv->gpu_error.last_acthd, acthd,
2084                        sizeof(acthd));
2085                 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
2086                        sizeof(instdone));
2087         }
2088
2089 repeat:
2090         /* Reset timer case chip hangs without another request being added */
2091         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2092                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2093 }
2094
2095 /* drm_dma.h hooks
2096 */
2097 static void ironlake_irq_preinstall(struct drm_device *dev)
2098 {
2099         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2100
2101         atomic_set(&dev_priv->irq_received, 0);
2102
2103         I915_WRITE(HWSTAM, 0xeffe);
2104
2105         /* XXX hotplug from PCH */
2106
2107         I915_WRITE(DEIMR, 0xffffffff);
2108         I915_WRITE(DEIER, 0x0);
2109         POSTING_READ(DEIER);
2110
2111         /* and GT */
2112         I915_WRITE(GTIMR, 0xffffffff);
2113         I915_WRITE(GTIER, 0x0);
2114         POSTING_READ(GTIER);
2115
2116         if (HAS_PCH_NOP(dev))
2117                 return;
2118
2119         /* south display irq */
2120         I915_WRITE(SDEIMR, 0xffffffff);
2121         /*
2122          * SDEIER is also touched by the interrupt handler to work around missed
2123          * PCH interrupts. Hence we can't update it after the interrupt handler
2124          * is enabled - instead we unconditionally enable all PCH interrupt
2125          * sources here, but then only unmask them as needed with SDEIMR.
2126          */
2127         I915_WRITE(SDEIER, 0xffffffff);
2128         POSTING_READ(SDEIER);
2129 }
2130
2131 static void valleyview_irq_preinstall(struct drm_device *dev)
2132 {
2133         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2134         int pipe;
2135
2136         atomic_set(&dev_priv->irq_received, 0);
2137
2138         /* VLV magic */
2139         I915_WRITE(VLV_IMR, 0);
2140         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2141         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2142         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2143
2144         /* and GT */
2145         I915_WRITE(GTIIR, I915_READ(GTIIR));
2146         I915_WRITE(GTIIR, I915_READ(GTIIR));
2147         I915_WRITE(GTIMR, 0xffffffff);
2148         I915_WRITE(GTIER, 0x0);
2149         POSTING_READ(GTIER);
2150
2151         I915_WRITE(DPINVGTT, 0xff);
2152
2153         I915_WRITE(PORT_HOTPLUG_EN, 0);
2154         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2155         for_each_pipe(pipe)
2156                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2157         I915_WRITE(VLV_IIR, 0xffffffff);
2158         I915_WRITE(VLV_IMR, 0xffffffff);
2159         I915_WRITE(VLV_IER, 0x0);
2160         POSTING_READ(VLV_IER);
2161 }
2162
2163 static void ibx_hpd_irq_setup(struct drm_device *dev)
2164 {
2165         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2166         struct drm_mode_config *mode_config = &dev->mode_config;
2167         struct intel_encoder *intel_encoder;
2168         u32 mask = ~I915_READ(SDEIMR);
2169         u32 hotplug;
2170
2171         if (HAS_PCH_IBX(dev)) {
2172                 mask &= ~SDE_HOTPLUG_MASK;
2173                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2174                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2175                                 mask |= hpd_ibx[intel_encoder->hpd_pin];
2176         } else {
2177                 mask &= ~SDE_HOTPLUG_MASK_CPT;
2178                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2179                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2180                                 mask |= hpd_cpt[intel_encoder->hpd_pin];
2181         }
2182
2183         I915_WRITE(SDEIMR, ~mask);
2184
2185         /*
2186          * Enable digital hotplug on the PCH, and configure the DP short pulse
2187          * duration to 2ms (which is the minimum in the Display Port spec)
2188          *
2189          * This register is the same on all known PCH chips.
2190          */
2191         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2192         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2193         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2194         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2195         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2196         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2197 }
2198
2199 static void ibx_irq_postinstall(struct drm_device *dev)
2200 {
2201         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2202         u32 mask;
2203
2204         if (HAS_PCH_IBX(dev))
2205                 mask = SDE_GMBUS | SDE_AUX_MASK;
2206         else
2207                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2208
2209         if (HAS_PCH_NOP(dev))
2210                 return;
2211
2212         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2213         I915_WRITE(SDEIMR, ~mask);
2214 }
2215
2216 static int ironlake_irq_postinstall(struct drm_device *dev)
2217 {
2218         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2219         /* enable kind of interrupts always enabled */
2220         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2221                            DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2222                            DE_AUX_CHANNEL_A;
2223         u32 render_irqs;
2224
2225         dev_priv->irq_mask = ~display_mask;
2226
2227         /* should always can generate irq */
2228         I915_WRITE(DEIIR, I915_READ(DEIIR));
2229         I915_WRITE(DEIMR, dev_priv->irq_mask);
2230         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2231         POSTING_READ(DEIER);
2232
2233         dev_priv->gt_irq_mask = ~0;
2234
2235         I915_WRITE(GTIIR, I915_READ(GTIIR));
2236         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2237
2238         if (IS_GEN6(dev))
2239                 render_irqs =
2240                         GT_USER_INTERRUPT |
2241                         GEN6_BSD_USER_INTERRUPT |
2242                         GEN6_BLITTER_USER_INTERRUPT;
2243         else
2244                 render_irqs =
2245                         GT_USER_INTERRUPT |
2246                         GT_PIPE_NOTIFY |
2247                         GT_BSD_USER_INTERRUPT;
2248         I915_WRITE(GTIER, render_irqs);
2249         POSTING_READ(GTIER);
2250
2251         ibx_irq_postinstall(dev);
2252
2253         if (IS_IRONLAKE_M(dev)) {
2254                 /* Clear & enable PCU event interrupts */
2255                 I915_WRITE(DEIIR, DE_PCU_EVENT);
2256                 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2257                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2258         }
2259
2260         return 0;
2261 }
2262
2263 static int ivybridge_irq_postinstall(struct drm_device *dev)
2264 {
2265         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2266         /* enable kind of interrupts always enabled */
2267         u32 display_mask =
2268                 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2269                 DE_PLANEC_FLIP_DONE_IVB |
2270                 DE_PLANEB_FLIP_DONE_IVB |
2271                 DE_PLANEA_FLIP_DONE_IVB |
2272                 DE_AUX_CHANNEL_A_IVB;
2273         u32 render_irqs;
2274
2275         dev_priv->irq_mask = ~display_mask;
2276
2277         /* should always can generate irq */
2278         I915_WRITE(DEIIR, I915_READ(DEIIR));
2279         I915_WRITE(DEIMR, dev_priv->irq_mask);
2280         I915_WRITE(DEIER,
2281                    display_mask |
2282                    DE_PIPEC_VBLANK_IVB |
2283                    DE_PIPEB_VBLANK_IVB |
2284                    DE_PIPEA_VBLANK_IVB);
2285         POSTING_READ(DEIER);
2286
2287         dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2288
2289         I915_WRITE(GTIIR, I915_READ(GTIIR));
2290         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2291
2292         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2293                 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2294         I915_WRITE(GTIER, render_irqs);
2295         POSTING_READ(GTIER);
2296
2297         ibx_irq_postinstall(dev);
2298
2299         return 0;
2300 }
2301
2302 static int valleyview_irq_postinstall(struct drm_device *dev)
2303 {
2304         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2305         u32 enable_mask;
2306         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2307         u32 render_irqs;
2308         u16 msid;
2309
2310         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2311         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2312                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2313                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2314                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2315
2316         /*
2317          *Leave vblank interrupts masked initially.  enable/disable will
2318          * toggle them based on usage.
2319          */
2320         dev_priv->irq_mask = (~enable_mask) |
2321                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2322                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2323
2324         /* Hack for broken MSIs on VLV */
2325         pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2326         pci_read_config_word(dev->pdev, 0x98, &msid);
2327         msid &= 0xff; /* mask out delivery bits */
2328         msid |= (1<<14);
2329         pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2330
2331         I915_WRITE(PORT_HOTPLUG_EN, 0);
2332         POSTING_READ(PORT_HOTPLUG_EN);
2333
2334         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2335         I915_WRITE(VLV_IER, enable_mask);
2336         I915_WRITE(VLV_IIR, 0xffffffff);
2337         I915_WRITE(PIPESTAT(0), 0xffff);
2338         I915_WRITE(PIPESTAT(1), 0xffff);
2339         POSTING_READ(VLV_IER);
2340
2341         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2342         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2343         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2344
2345         I915_WRITE(VLV_IIR, 0xffffffff);
2346         I915_WRITE(VLV_IIR, 0xffffffff);
2347
2348         I915_WRITE(GTIIR, I915_READ(GTIIR));
2349         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2350
2351         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2352                 GEN6_BLITTER_USER_INTERRUPT;
2353         I915_WRITE(GTIER, render_irqs);
2354         POSTING_READ(GTIER);
2355
2356         /* ack & enable invalid PTE error interrupts */
2357 #if 0 /* FIXME: add support to irq handler for checking these bits */
2358         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2359         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2360 #endif
2361
2362         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2363
2364         return 0;
2365 }
2366
2367 static void valleyview_irq_uninstall(struct drm_device *dev)
2368 {
2369         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2370         int pipe;
2371
2372         if (!dev_priv)
2373                 return;
2374
2375         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2376
2377         for_each_pipe(pipe)
2378                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2379
2380         I915_WRITE(HWSTAM, 0xffffffff);
2381         I915_WRITE(PORT_HOTPLUG_EN, 0);
2382         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2383         for_each_pipe(pipe)
2384                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2385         I915_WRITE(VLV_IIR, 0xffffffff);
2386         I915_WRITE(VLV_IMR, 0xffffffff);
2387         I915_WRITE(VLV_IER, 0x0);
2388         POSTING_READ(VLV_IER);
2389 }
2390
2391 static void ironlake_irq_uninstall(struct drm_device *dev)
2392 {
2393         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2394
2395         if (!dev_priv)
2396                 return;
2397
2398         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2399
2400         I915_WRITE(HWSTAM, 0xffffffff);
2401
2402         I915_WRITE(DEIMR, 0xffffffff);
2403         I915_WRITE(DEIER, 0x0);
2404         I915_WRITE(DEIIR, I915_READ(DEIIR));
2405
2406         I915_WRITE(GTIMR, 0xffffffff);
2407         I915_WRITE(GTIER, 0x0);
2408         I915_WRITE(GTIIR, I915_READ(GTIIR));
2409
2410         if (HAS_PCH_NOP(dev))
2411                 return;
2412
2413         I915_WRITE(SDEIMR, 0xffffffff);
2414         I915_WRITE(SDEIER, 0x0);
2415         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2416 }
2417
2418 static void i8xx_irq_preinstall(struct drm_device * dev)
2419 {
2420         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2421         int pipe;
2422
2423         atomic_set(&dev_priv->irq_received, 0);
2424
2425         for_each_pipe(pipe)
2426                 I915_WRITE(PIPESTAT(pipe), 0);
2427         I915_WRITE16(IMR, 0xffff);
2428         I915_WRITE16(IER, 0x0);
2429         POSTING_READ16(IER);
2430 }
2431
2432 static int i8xx_irq_postinstall(struct drm_device *dev)
2433 {
2434         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2435
2436         I915_WRITE16(EMR,
2437                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2438
2439         /* Unmask the interrupts that we always want on. */
2440         dev_priv->irq_mask =
2441                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2442                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2443                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2444                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2445                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2446         I915_WRITE16(IMR, dev_priv->irq_mask);
2447
2448         I915_WRITE16(IER,
2449                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2450                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2451                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2452                      I915_USER_INTERRUPT);
2453         POSTING_READ16(IER);
2454
2455         return 0;
2456 }
2457
2458 /*
2459  * Returns true when a page flip has completed.
2460  */
2461 static bool i8xx_handle_vblank(struct drm_device *dev,
2462                                int pipe, u16 iir)
2463 {
2464         drm_i915_private_t *dev_priv = dev->dev_private;
2465         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2466
2467         if (!drm_handle_vblank(dev, pipe))
2468                 return false;
2469
2470         if ((iir & flip_pending) == 0)
2471                 return false;
2472
2473         intel_prepare_page_flip(dev, pipe);
2474
2475         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2476          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2477          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2478          * the flip is completed (no longer pending). Since this doesn't raise
2479          * an interrupt per se, we watch for the change at vblank.
2480          */
2481         if (I915_READ16(ISR) & flip_pending)
2482                 return false;
2483
2484         intel_finish_page_flip(dev, pipe);
2485
2486         return true;
2487 }
2488
2489 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2490 {
2491         struct drm_device *dev = (struct drm_device *) arg;
2492         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2493         u16 iir, new_iir;
2494         u32 pipe_stats[2];
2495         unsigned long irqflags;
2496         int irq_received;
2497         int pipe;
2498         u16 flip_mask =
2499                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2500                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2501
2502         atomic_inc(&dev_priv->irq_received);
2503
2504         iir = I915_READ16(IIR);
2505         if (iir == 0)
2506                 return IRQ_NONE;
2507
2508         while (iir & ~flip_mask) {
2509                 /* Can't rely on pipestat interrupt bit in iir as it might
2510                  * have been cleared after the pipestat interrupt was received.
2511                  * It doesn't set the bit in iir again, but it still produces
2512                  * interrupts (for non-MSI).
2513                  */
2514                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2515                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2516                         i915_handle_error(dev, false);
2517
2518                 for_each_pipe(pipe) {
2519                         int reg = PIPESTAT(pipe);
2520                         pipe_stats[pipe] = I915_READ(reg);
2521
2522                         /*
2523                          * Clear the PIPE*STAT regs before the IIR
2524                          */
2525                         if (pipe_stats[pipe] & 0x8000ffff) {
2526                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2527                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2528                                                          pipe_name(pipe));
2529                                 I915_WRITE(reg, pipe_stats[pipe]);
2530                                 irq_received = 1;
2531                         }
2532                 }
2533                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2534
2535                 I915_WRITE16(IIR, iir & ~flip_mask);
2536                 new_iir = I915_READ16(IIR); /* Flush posted writes */
2537
2538                 i915_update_dri1_breadcrumb(dev);
2539
2540                 if (iir & I915_USER_INTERRUPT)
2541                         notify_ring(dev, &dev_priv->ring[RCS]);
2542
2543                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2544                     i8xx_handle_vblank(dev, 0, iir))
2545                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2546
2547                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2548                     i8xx_handle_vblank(dev, 1, iir))
2549                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2550
2551                 iir = new_iir;
2552         }
2553
2554         return IRQ_HANDLED;
2555 }
2556
2557 static void i8xx_irq_uninstall(struct drm_device * dev)
2558 {
2559         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2560         int pipe;
2561
2562         for_each_pipe(pipe) {
2563                 /* Clear enable bits; then clear status bits */
2564                 I915_WRITE(PIPESTAT(pipe), 0);
2565                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2566         }
2567         I915_WRITE16(IMR, 0xffff);
2568         I915_WRITE16(IER, 0x0);
2569         I915_WRITE16(IIR, I915_READ16(IIR));
2570 }
2571
2572 static void i915_irq_preinstall(struct drm_device * dev)
2573 {
2574         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2575         int pipe;
2576
2577         atomic_set(&dev_priv->irq_received, 0);
2578
2579         if (I915_HAS_HOTPLUG(dev)) {
2580                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2581                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2582         }
2583
2584         I915_WRITE16(HWSTAM, 0xeffe);
2585         for_each_pipe(pipe)
2586                 I915_WRITE(PIPESTAT(pipe), 0);
2587         I915_WRITE(IMR, 0xffffffff);
2588         I915_WRITE(IER, 0x0);
2589         POSTING_READ(IER);
2590 }
2591
2592 static int i915_irq_postinstall(struct drm_device *dev)
2593 {
2594         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2595         u32 enable_mask;
2596
2597         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2598
2599         /* Unmask the interrupts that we always want on. */
2600         dev_priv->irq_mask =
2601                 ~(I915_ASLE_INTERRUPT |
2602                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2603                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2604                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2605                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2606                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2607
2608         enable_mask =
2609                 I915_ASLE_INTERRUPT |
2610                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2611                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2612                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2613                 I915_USER_INTERRUPT;
2614
2615         if (I915_HAS_HOTPLUG(dev)) {
2616                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2617                 POSTING_READ(PORT_HOTPLUG_EN);
2618
2619                 /* Enable in IER... */
2620                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2621                 /* and unmask in IMR */
2622                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2623         }
2624
2625         I915_WRITE(IMR, dev_priv->irq_mask);
2626         I915_WRITE(IER, enable_mask);
2627         POSTING_READ(IER);
2628
2629         intel_opregion_enable_asle(dev);
2630
2631         return 0;
2632 }
2633
2634 /*
2635  * Returns true when a page flip has completed.
2636  */
2637 static bool i915_handle_vblank(struct drm_device *dev,
2638                                int plane, int pipe, u32 iir)
2639 {
2640         drm_i915_private_t *dev_priv = dev->dev_private;
2641         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2642
2643         if (!drm_handle_vblank(dev, pipe))
2644                 return false;
2645
2646         if ((iir & flip_pending) == 0)
2647                 return false;
2648
2649         intel_prepare_page_flip(dev, plane);
2650
2651         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2652          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2653          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2654          * the flip is completed (no longer pending). Since this doesn't raise
2655          * an interrupt per se, we watch for the change at vblank.
2656          */
2657         if (I915_READ(ISR) & flip_pending)
2658                 return false;
2659
2660         intel_finish_page_flip(dev, pipe);
2661
2662         return true;
2663 }
2664
2665 static irqreturn_t i915_irq_handler(int irq, void *arg)
2666 {
2667         struct drm_device *dev = (struct drm_device *) arg;
2668         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2669         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2670         unsigned long irqflags;
2671         u32 flip_mask =
2672                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2673                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2674         int pipe, ret = IRQ_NONE;
2675
2676         atomic_inc(&dev_priv->irq_received);
2677
2678         iir = I915_READ(IIR);
2679         do {
2680                 bool irq_received = (iir & ~flip_mask) != 0;
2681                 bool blc_event = false;
2682
2683                 /* Can't rely on pipestat interrupt bit in iir as it might
2684                  * have been cleared after the pipestat interrupt was received.
2685                  * It doesn't set the bit in iir again, but it still produces
2686                  * interrupts (for non-MSI).
2687                  */
2688                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2689                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2690                         i915_handle_error(dev, false);
2691
2692                 for_each_pipe(pipe) {
2693                         int reg = PIPESTAT(pipe);
2694                         pipe_stats[pipe] = I915_READ(reg);
2695
2696                         /* Clear the PIPE*STAT regs before the IIR */
2697                         if (pipe_stats[pipe] & 0x8000ffff) {
2698                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2699                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2700                                                          pipe_name(pipe));
2701                                 I915_WRITE(reg, pipe_stats[pipe]);
2702                                 irq_received = true;
2703                         }
2704                 }
2705                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2706
2707                 if (!irq_received)
2708                         break;
2709
2710                 /* Consume port.  Then clear IIR or we'll miss events */
2711                 if ((I915_HAS_HOTPLUG(dev)) &&
2712                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2713                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2714                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2715
2716                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2717                                   hotplug_status);
2718                         if (hotplug_trigger) {
2719                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
2720                                         i915_hpd_irq_setup(dev);
2721                                 queue_work(dev_priv->wq,
2722                                            &dev_priv->hotplug_work);
2723                         }
2724                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2725                         POSTING_READ(PORT_HOTPLUG_STAT);
2726                 }
2727
2728                 I915_WRITE(IIR, iir & ~flip_mask);
2729                 new_iir = I915_READ(IIR); /* Flush posted writes */
2730
2731                 if (iir & I915_USER_INTERRUPT)
2732                         notify_ring(dev, &dev_priv->ring[RCS]);
2733
2734                 for_each_pipe(pipe) {
2735                         int plane = pipe;
2736                         if (IS_MOBILE(dev))
2737                                 plane = !plane;
2738
2739                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2740                             i915_handle_vblank(dev, plane, pipe, iir))
2741                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2742
2743                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2744                                 blc_event = true;
2745                 }
2746
2747                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2748                         intel_opregion_asle_intr(dev);
2749
2750                 /* With MSI, interrupts are only generated when iir
2751                  * transitions from zero to nonzero.  If another bit got
2752                  * set while we were handling the existing iir bits, then
2753                  * we would never get another interrupt.
2754                  *
2755                  * This is fine on non-MSI as well, as if we hit this path
2756                  * we avoid exiting the interrupt handler only to generate
2757                  * another one.
2758                  *
2759                  * Note that for MSI this could cause a stray interrupt report
2760                  * if an interrupt landed in the time between writing IIR and
2761                  * the posting read.  This should be rare enough to never
2762                  * trigger the 99% of 100,000 interrupts test for disabling
2763                  * stray interrupts.
2764                  */
2765                 ret = IRQ_HANDLED;
2766                 iir = new_iir;
2767         } while (iir & ~flip_mask);
2768
2769         i915_update_dri1_breadcrumb(dev);
2770
2771         return ret;
2772 }
2773
2774 static void i915_irq_uninstall(struct drm_device * dev)
2775 {
2776         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2777         int pipe;
2778
2779         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2780
2781         if (I915_HAS_HOTPLUG(dev)) {
2782                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2783                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2784         }
2785
2786         I915_WRITE16(HWSTAM, 0xffff);
2787         for_each_pipe(pipe) {
2788                 /* Clear enable bits; then clear status bits */
2789                 I915_WRITE(PIPESTAT(pipe), 0);
2790                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2791         }
2792         I915_WRITE(IMR, 0xffffffff);
2793         I915_WRITE(IER, 0x0);
2794
2795         I915_WRITE(IIR, I915_READ(IIR));
2796 }
2797
2798 static void i965_irq_preinstall(struct drm_device * dev)
2799 {
2800         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2801         int pipe;
2802
2803         atomic_set(&dev_priv->irq_received, 0);
2804
2805         I915_WRITE(PORT_HOTPLUG_EN, 0);
2806         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2807
2808         I915_WRITE(HWSTAM, 0xeffe);
2809         for_each_pipe(pipe)
2810                 I915_WRITE(PIPESTAT(pipe), 0);
2811         I915_WRITE(IMR, 0xffffffff);
2812         I915_WRITE(IER, 0x0);
2813         POSTING_READ(IER);
2814 }
2815
2816 static int i965_irq_postinstall(struct drm_device *dev)
2817 {
2818         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2819         u32 enable_mask;
2820         u32 error_mask;
2821
2822         /* Unmask the interrupts that we always want on. */
2823         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2824                                I915_DISPLAY_PORT_INTERRUPT |
2825                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2826                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2827                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2828                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2829                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2830
2831         enable_mask = ~dev_priv->irq_mask;
2832         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2833                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2834         enable_mask |= I915_USER_INTERRUPT;
2835
2836         if (IS_G4X(dev))
2837                 enable_mask |= I915_BSD_USER_INTERRUPT;
2838
2839         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2840
2841         /*
2842          * Enable some error detection, note the instruction error mask
2843          * bit is reserved, so we leave it masked.
2844          */
2845         if (IS_G4X(dev)) {
2846                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2847                                GM45_ERROR_MEM_PRIV |
2848                                GM45_ERROR_CP_PRIV |
2849                                I915_ERROR_MEMORY_REFRESH);
2850         } else {
2851                 error_mask = ~(I915_ERROR_PAGE_TABLE |
2852                                I915_ERROR_MEMORY_REFRESH);
2853         }
2854         I915_WRITE(EMR, error_mask);
2855
2856         I915_WRITE(IMR, dev_priv->irq_mask);
2857         I915_WRITE(IER, enable_mask);
2858         POSTING_READ(IER);
2859
2860         I915_WRITE(PORT_HOTPLUG_EN, 0);
2861         POSTING_READ(PORT_HOTPLUG_EN);
2862
2863         intel_opregion_enable_asle(dev);
2864
2865         return 0;
2866 }
2867
2868 static void i915_hpd_irq_setup(struct drm_device *dev)
2869 {
2870         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2871         struct drm_mode_config *mode_config = &dev->mode_config;
2872         struct intel_encoder *intel_encoder;
2873         u32 hotplug_en;
2874
2875         if (I915_HAS_HOTPLUG(dev)) {
2876                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2877                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2878                 /* Note HDMI and DP share hotplug bits */
2879                 /* enable bits are the same for all generations */
2880                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2881                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2882                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
2883                 /* Programming the CRT detection parameters tends
2884                    to generate a spurious hotplug event about three
2885                    seconds later.  So just do it once.
2886                 */
2887                 if (IS_G4X(dev))
2888                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2889                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2890                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2891
2892                 /* Ignore TV since it's buggy */
2893                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2894         }
2895 }
2896
2897 static irqreturn_t i965_irq_handler(int irq, void *arg)
2898 {
2899         struct drm_device *dev = (struct drm_device *) arg;
2900         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2901         u32 iir, new_iir;
2902         u32 pipe_stats[I915_MAX_PIPES];
2903         unsigned long irqflags;
2904         int irq_received;
2905         int ret = IRQ_NONE, pipe;
2906         u32 flip_mask =
2907                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2908                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2909
2910         atomic_inc(&dev_priv->irq_received);
2911
2912         iir = I915_READ(IIR);
2913
2914         for (;;) {
2915                 bool blc_event = false;
2916
2917                 irq_received = (iir & ~flip_mask) != 0;
2918
2919                 /* Can't rely on pipestat interrupt bit in iir as it might
2920                  * have been cleared after the pipestat interrupt was received.
2921                  * It doesn't set the bit in iir again, but it still produces
2922                  * interrupts (for non-MSI).
2923                  */
2924                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2925                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2926                         i915_handle_error(dev, false);
2927
2928                 for_each_pipe(pipe) {
2929                         int reg = PIPESTAT(pipe);
2930                         pipe_stats[pipe] = I915_READ(reg);
2931
2932                         /*
2933                          * Clear the PIPE*STAT regs before the IIR
2934                          */
2935                         if (pipe_stats[pipe] & 0x8000ffff) {
2936                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2937                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2938                                                          pipe_name(pipe));
2939                                 I915_WRITE(reg, pipe_stats[pipe]);
2940                                 irq_received = 1;
2941                         }
2942                 }
2943                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2944
2945                 if (!irq_received)
2946                         break;
2947
2948                 ret = IRQ_HANDLED;
2949
2950                 /* Consume port.  Then clear IIR or we'll miss events */
2951                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2952                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2953                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2954                                                                   HOTPLUG_INT_STATUS_G4X :
2955                                                                   HOTPLUG_INT_STATUS_I965);
2956
2957                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2958                                   hotplug_status);
2959                         if (hotplug_trigger) {
2960                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
2961                                                             IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
2962                                         i915_hpd_irq_setup(dev);
2963                                 queue_work(dev_priv->wq,
2964                                            &dev_priv->hotplug_work);
2965                         }
2966                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2967                         I915_READ(PORT_HOTPLUG_STAT);
2968                 }
2969
2970                 I915_WRITE(IIR, iir & ~flip_mask);
2971                 new_iir = I915_READ(IIR); /* Flush posted writes */
2972
2973                 if (iir & I915_USER_INTERRUPT)
2974                         notify_ring(dev, &dev_priv->ring[RCS]);
2975                 if (iir & I915_BSD_USER_INTERRUPT)
2976                         notify_ring(dev, &dev_priv->ring[VCS]);
2977
2978                 for_each_pipe(pipe) {
2979                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2980                             i915_handle_vblank(dev, pipe, pipe, iir))
2981                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2982
2983                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2984                                 blc_event = true;
2985                 }
2986
2987
2988                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2989                         intel_opregion_asle_intr(dev);
2990
2991                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2992                         gmbus_irq_handler(dev);
2993
2994                 /* With MSI, interrupts are only generated when iir
2995                  * transitions from zero to nonzero.  If another bit got
2996                  * set while we were handling the existing iir bits, then
2997                  * we would never get another interrupt.
2998                  *
2999                  * This is fine on non-MSI as well, as if we hit this path
3000                  * we avoid exiting the interrupt handler only to generate
3001                  * another one.
3002                  *
3003                  * Note that for MSI this could cause a stray interrupt report
3004                  * if an interrupt landed in the time between writing IIR and
3005                  * the posting read.  This should be rare enough to never
3006                  * trigger the 99% of 100,000 interrupts test for disabling
3007                  * stray interrupts.
3008                  */
3009                 iir = new_iir;
3010         }
3011
3012         i915_update_dri1_breadcrumb(dev);
3013
3014         return ret;
3015 }
3016
3017 static void i965_irq_uninstall(struct drm_device * dev)
3018 {
3019         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3020         int pipe;
3021
3022         if (!dev_priv)
3023                 return;
3024
3025         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3026
3027         I915_WRITE(PORT_HOTPLUG_EN, 0);
3028         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3029
3030         I915_WRITE(HWSTAM, 0xffffffff);
3031         for_each_pipe(pipe)
3032                 I915_WRITE(PIPESTAT(pipe), 0);
3033         I915_WRITE(IMR, 0xffffffff);
3034         I915_WRITE(IER, 0x0);
3035
3036         for_each_pipe(pipe)
3037                 I915_WRITE(PIPESTAT(pipe),
3038                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3039         I915_WRITE(IIR, I915_READ(IIR));
3040 }
3041
3042 static void i915_reenable_hotplug_timer_func(unsigned long data)
3043 {
3044         drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3045         struct drm_device *dev = dev_priv->dev;
3046         struct drm_mode_config *mode_config = &dev->mode_config;
3047         unsigned long irqflags;
3048         int i;
3049
3050         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3051         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3052                 struct drm_connector *connector;
3053
3054                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3055                         continue;
3056
3057                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3058
3059                 list_for_each_entry(connector, &mode_config->connector_list, head) {
3060                         struct intel_connector *intel_connector = to_intel_connector(connector);
3061
3062                         if (intel_connector->encoder->hpd_pin == i) {
3063                                 if (connector->polled != intel_connector->polled)
3064                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3065                                                          drm_get_connector_name(connector));
3066                                 connector->polled = intel_connector->polled;
3067                                 if (!connector->polled)
3068                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3069                         }
3070                 }
3071         }
3072         if (dev_priv->display.hpd_irq_setup)
3073                 dev_priv->display.hpd_irq_setup(dev);
3074         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3075 }
3076
3077 void intel_irq_init(struct drm_device *dev)
3078 {
3079         struct drm_i915_private *dev_priv = dev->dev_private;
3080
3081         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3082         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3083         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3084         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3085
3086         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3087                     i915_hangcheck_elapsed,
3088                     (unsigned long) dev);
3089         setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3090                     (unsigned long) dev_priv);
3091
3092         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3093
3094         dev->driver->get_vblank_counter = i915_get_vblank_counter;
3095         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3096         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3097                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3098                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3099         }
3100
3101         if (drm_core_check_feature(dev, DRIVER_MODESET))
3102                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3103         else
3104                 dev->driver->get_vblank_timestamp = NULL;
3105         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3106
3107         if (IS_VALLEYVIEW(dev)) {
3108                 dev->driver->irq_handler = valleyview_irq_handler;
3109                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3110                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3111                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3112                 dev->driver->enable_vblank = valleyview_enable_vblank;
3113                 dev->driver->disable_vblank = valleyview_disable_vblank;
3114                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3115         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3116                 /* Share pre & uninstall handlers with ILK/SNB */
3117                 dev->driver->irq_handler = ivybridge_irq_handler;
3118                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3119                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3120                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3121                 dev->driver->enable_vblank = ivybridge_enable_vblank;
3122                 dev->driver->disable_vblank = ivybridge_disable_vblank;
3123                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3124         } else if (HAS_PCH_SPLIT(dev)) {
3125                 dev->driver->irq_handler = ironlake_irq_handler;
3126                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3127                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3128                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3129                 dev->driver->enable_vblank = ironlake_enable_vblank;
3130                 dev->driver->disable_vblank = ironlake_disable_vblank;
3131                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3132         } else {
3133                 if (INTEL_INFO(dev)->gen == 2) {
3134                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
3135                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
3136                         dev->driver->irq_handler = i8xx_irq_handler;
3137                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
3138                 } else if (INTEL_INFO(dev)->gen == 3) {
3139                         dev->driver->irq_preinstall = i915_irq_preinstall;
3140                         dev->driver->irq_postinstall = i915_irq_postinstall;
3141                         dev->driver->irq_uninstall = i915_irq_uninstall;
3142                         dev->driver->irq_handler = i915_irq_handler;
3143                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3144                 } else {
3145                         dev->driver->irq_preinstall = i965_irq_preinstall;
3146                         dev->driver->irq_postinstall = i965_irq_postinstall;
3147                         dev->driver->irq_uninstall = i965_irq_uninstall;
3148                         dev->driver->irq_handler = i965_irq_handler;
3149                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3150                 }
3151                 dev->driver->enable_vblank = i915_enable_vblank;
3152                 dev->driver->disable_vblank = i915_disable_vblank;
3153         }
3154 }
3155
3156 void intel_hpd_init(struct drm_device *dev)
3157 {
3158         struct drm_i915_private *dev_priv = dev->dev_private;
3159         struct drm_mode_config *mode_config = &dev->mode_config;
3160         struct drm_connector *connector;
3161         int i;
3162
3163         for (i = 1; i < HPD_NUM_PINS; i++) {
3164                 dev_priv->hpd_stats[i].hpd_cnt = 0;
3165                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3166         }
3167         list_for_each_entry(connector, &mode_config->connector_list, head) {
3168                 struct intel_connector *intel_connector = to_intel_connector(connector);
3169                 connector->polled = intel_connector->polled;
3170                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3171                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3172         }
3173         if (dev_priv->display.hpd_irq_setup)
3174                 dev_priv->display.hpd_irq_setup(dev);
3175 }