Merge tag 'topic/core-stuff-2014-12-10' of git://anongit.freedesktop.org/drm-intel...
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 bool
37 intel_ring_initialized(struct intel_engine_cs *ring)
38 {
39         struct drm_device *dev = ring->dev;
40
41         if (!dev)
42                 return false;
43
44         if (i915.enable_execlists) {
45                 struct intel_context *dctx = ring->default_context;
46                 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
47
48                 return ringbuf->obj;
49         } else
50                 return ring->buffer && ring->buffer->obj;
51 }
52
53 int __intel_ring_space(int head, int tail, int size)
54 {
55         int space = head - (tail + I915_RING_FREE_SPACE);
56         if (space < 0)
57                 space += size;
58         return space;
59 }
60
61 int intel_ring_space(struct intel_ringbuffer *ringbuf)
62 {
63         return __intel_ring_space(ringbuf->head & HEAD_ADDR,
64                                   ringbuf->tail, ringbuf->size);
65 }
66
67 bool intel_ring_stopped(struct intel_engine_cs *ring)
68 {
69         struct drm_i915_private *dev_priv = ring->dev->dev_private;
70         return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
71 }
72
73 void __intel_ring_advance(struct intel_engine_cs *ring)
74 {
75         struct intel_ringbuffer *ringbuf = ring->buffer;
76         ringbuf->tail &= ringbuf->size - 1;
77         if (intel_ring_stopped(ring))
78                 return;
79         ring->write_tail(ring, ringbuf->tail);
80 }
81
82 static int
83 gen2_render_ring_flush(struct intel_engine_cs *ring,
84                        u32      invalidate_domains,
85                        u32      flush_domains)
86 {
87         u32 cmd;
88         int ret;
89
90         cmd = MI_FLUSH;
91         if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
92                 cmd |= MI_NO_WRITE_FLUSH;
93
94         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
95                 cmd |= MI_READ_FLUSH;
96
97         ret = intel_ring_begin(ring, 2);
98         if (ret)
99                 return ret;
100
101         intel_ring_emit(ring, cmd);
102         intel_ring_emit(ring, MI_NOOP);
103         intel_ring_advance(ring);
104
105         return 0;
106 }
107
108 static int
109 gen4_render_ring_flush(struct intel_engine_cs *ring,
110                        u32      invalidate_domains,
111                        u32      flush_domains)
112 {
113         struct drm_device *dev = ring->dev;
114         u32 cmd;
115         int ret;
116
117         /*
118          * read/write caches:
119          *
120          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
121          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
122          * also flushed at 2d versus 3d pipeline switches.
123          *
124          * read-only caches:
125          *
126          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
127          * MI_READ_FLUSH is set, and is always flushed on 965.
128          *
129          * I915_GEM_DOMAIN_COMMAND may not exist?
130          *
131          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
132          * invalidated when MI_EXE_FLUSH is set.
133          *
134          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
135          * invalidated with every MI_FLUSH.
136          *
137          * TLBs:
138          *
139          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
140          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
141          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
142          * are flushed at any MI_FLUSH.
143          */
144
145         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
146         if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
147                 cmd &= ~MI_NO_WRITE_FLUSH;
148         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
149                 cmd |= MI_EXE_FLUSH;
150
151         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
152             (IS_G4X(dev) || IS_GEN5(dev)))
153                 cmd |= MI_INVALIDATE_ISP;
154
155         ret = intel_ring_begin(ring, 2);
156         if (ret)
157                 return ret;
158
159         intel_ring_emit(ring, cmd);
160         intel_ring_emit(ring, MI_NOOP);
161         intel_ring_advance(ring);
162
163         return 0;
164 }
165
166 /**
167  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
168  * implementing two workarounds on gen6.  From section 1.4.7.1
169  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
170  *
171  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
172  * produced by non-pipelined state commands), software needs to first
173  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
174  * 0.
175  *
176  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
177  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
178  *
179  * And the workaround for these two requires this workaround first:
180  *
181  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
182  * BEFORE the pipe-control with a post-sync op and no write-cache
183  * flushes.
184  *
185  * And this last workaround is tricky because of the requirements on
186  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
187  * volume 2 part 1:
188  *
189  *     "1 of the following must also be set:
190  *      - Render Target Cache Flush Enable ([12] of DW1)
191  *      - Depth Cache Flush Enable ([0] of DW1)
192  *      - Stall at Pixel Scoreboard ([1] of DW1)
193  *      - Depth Stall ([13] of DW1)
194  *      - Post-Sync Operation ([13] of DW1)
195  *      - Notify Enable ([8] of DW1)"
196  *
197  * The cache flushes require the workaround flush that triggered this
198  * one, so we can't use it.  Depth stall would trigger the same.
199  * Post-sync nonzero is what triggered this second workaround, so we
200  * can't use that one either.  Notify enable is IRQs, which aren't
201  * really our business.  That leaves only stall at scoreboard.
202  */
203 static int
204 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
205 {
206         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
207         int ret;
208
209
210         ret = intel_ring_begin(ring, 6);
211         if (ret)
212                 return ret;
213
214         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
215         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
216                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
217         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
218         intel_ring_emit(ring, 0); /* low dword */
219         intel_ring_emit(ring, 0); /* high dword */
220         intel_ring_emit(ring, MI_NOOP);
221         intel_ring_advance(ring);
222
223         ret = intel_ring_begin(ring, 6);
224         if (ret)
225                 return ret;
226
227         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
228         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
229         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
230         intel_ring_emit(ring, 0);
231         intel_ring_emit(ring, 0);
232         intel_ring_emit(ring, MI_NOOP);
233         intel_ring_advance(ring);
234
235         return 0;
236 }
237
238 static int
239 gen6_render_ring_flush(struct intel_engine_cs *ring,
240                          u32 invalidate_domains, u32 flush_domains)
241 {
242         u32 flags = 0;
243         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
244         int ret;
245
246         /* Force SNB workarounds for PIPE_CONTROL flushes */
247         ret = intel_emit_post_sync_nonzero_flush(ring);
248         if (ret)
249                 return ret;
250
251         /* Just flush everything.  Experiments have shown that reducing the
252          * number of bits based on the write domains has little performance
253          * impact.
254          */
255         if (flush_domains) {
256                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
257                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
258                 /*
259                  * Ensure that any following seqno writes only happen
260                  * when the render cache is indeed flushed.
261                  */
262                 flags |= PIPE_CONTROL_CS_STALL;
263         }
264         if (invalidate_domains) {
265                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
266                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
267                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
268                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
269                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
270                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
271                 /*
272                  * TLB invalidate requires a post-sync write.
273                  */
274                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
275         }
276
277         ret = intel_ring_begin(ring, 4);
278         if (ret)
279                 return ret;
280
281         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
282         intel_ring_emit(ring, flags);
283         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
284         intel_ring_emit(ring, 0);
285         intel_ring_advance(ring);
286
287         return 0;
288 }
289
290 static int
291 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
292 {
293         int ret;
294
295         ret = intel_ring_begin(ring, 4);
296         if (ret)
297                 return ret;
298
299         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
300         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
301                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
302         intel_ring_emit(ring, 0);
303         intel_ring_emit(ring, 0);
304         intel_ring_advance(ring);
305
306         return 0;
307 }
308
309 static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
310 {
311         int ret;
312
313         if (!ring->fbc_dirty)
314                 return 0;
315
316         ret = intel_ring_begin(ring, 6);
317         if (ret)
318                 return ret;
319         /* WaFbcNukeOn3DBlt:ivb/hsw */
320         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
321         intel_ring_emit(ring, MSG_FBC_REND_STATE);
322         intel_ring_emit(ring, value);
323         intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
324         intel_ring_emit(ring, MSG_FBC_REND_STATE);
325         intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
326         intel_ring_advance(ring);
327
328         ring->fbc_dirty = false;
329         return 0;
330 }
331
332 static int
333 gen7_render_ring_flush(struct intel_engine_cs *ring,
334                        u32 invalidate_domains, u32 flush_domains)
335 {
336         u32 flags = 0;
337         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
338         int ret;
339
340         /*
341          * Ensure that any following seqno writes only happen when the render
342          * cache is indeed flushed.
343          *
344          * Workaround: 4th PIPE_CONTROL command (except the ones with only
345          * read-cache invalidate bits set) must have the CS_STALL bit set. We
346          * don't try to be clever and just set it unconditionally.
347          */
348         flags |= PIPE_CONTROL_CS_STALL;
349
350         /* Just flush everything.  Experiments have shown that reducing the
351          * number of bits based on the write domains has little performance
352          * impact.
353          */
354         if (flush_domains) {
355                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
356                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
357         }
358         if (invalidate_domains) {
359                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
360                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
361                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
362                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
363                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
364                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
365                 /*
366                  * TLB invalidate requires a post-sync write.
367                  */
368                 flags |= PIPE_CONTROL_QW_WRITE;
369                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
370
371                 /* Workaround: we must issue a pipe_control with CS-stall bit
372                  * set before a pipe_control command that has the state cache
373                  * invalidate bit set. */
374                 gen7_render_ring_cs_stall_wa(ring);
375         }
376
377         ret = intel_ring_begin(ring, 4);
378         if (ret)
379                 return ret;
380
381         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
382         intel_ring_emit(ring, flags);
383         intel_ring_emit(ring, scratch_addr);
384         intel_ring_emit(ring, 0);
385         intel_ring_advance(ring);
386
387         if (!invalidate_domains && flush_domains)
388                 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
389
390         return 0;
391 }
392
393 static int
394 gen8_emit_pipe_control(struct intel_engine_cs *ring,
395                        u32 flags, u32 scratch_addr)
396 {
397         int ret;
398
399         ret = intel_ring_begin(ring, 6);
400         if (ret)
401                 return ret;
402
403         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
404         intel_ring_emit(ring, flags);
405         intel_ring_emit(ring, scratch_addr);
406         intel_ring_emit(ring, 0);
407         intel_ring_emit(ring, 0);
408         intel_ring_emit(ring, 0);
409         intel_ring_advance(ring);
410
411         return 0;
412 }
413
414 static int
415 gen8_render_ring_flush(struct intel_engine_cs *ring,
416                        u32 invalidate_domains, u32 flush_domains)
417 {
418         u32 flags = 0;
419         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
420         int ret;
421
422         flags |= PIPE_CONTROL_CS_STALL;
423
424         if (flush_domains) {
425                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
426                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
427         }
428         if (invalidate_domains) {
429                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
430                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
431                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
432                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
433                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
434                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
435                 flags |= PIPE_CONTROL_QW_WRITE;
436                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
437
438                 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
439                 ret = gen8_emit_pipe_control(ring,
440                                              PIPE_CONTROL_CS_STALL |
441                                              PIPE_CONTROL_STALL_AT_SCOREBOARD,
442                                              0);
443                 if (ret)
444                         return ret;
445         }
446
447         ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
448         if (ret)
449                 return ret;
450
451         if (!invalidate_domains && flush_domains)
452                 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
453
454         return 0;
455 }
456
457 static void ring_write_tail(struct intel_engine_cs *ring,
458                             u32 value)
459 {
460         struct drm_i915_private *dev_priv = ring->dev->dev_private;
461         I915_WRITE_TAIL(ring, value);
462 }
463
464 u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
465 {
466         struct drm_i915_private *dev_priv = ring->dev->dev_private;
467         u64 acthd;
468
469         if (INTEL_INFO(ring->dev)->gen >= 8)
470                 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
471                                          RING_ACTHD_UDW(ring->mmio_base));
472         else if (INTEL_INFO(ring->dev)->gen >= 4)
473                 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
474         else
475                 acthd = I915_READ(ACTHD);
476
477         return acthd;
478 }
479
480 static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
481 {
482         struct drm_i915_private *dev_priv = ring->dev->dev_private;
483         u32 addr;
484
485         addr = dev_priv->status_page_dmah->busaddr;
486         if (INTEL_INFO(ring->dev)->gen >= 4)
487                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
488         I915_WRITE(HWS_PGA, addr);
489 }
490
491 static bool stop_ring(struct intel_engine_cs *ring)
492 {
493         struct drm_i915_private *dev_priv = to_i915(ring->dev);
494
495         if (!IS_GEN2(ring->dev)) {
496                 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
497                 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
498                         DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
499                         /* Sometimes we observe that the idle flag is not
500                          * set even though the ring is empty. So double
501                          * check before giving up.
502                          */
503                         if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
504                                 return false;
505                 }
506         }
507
508         I915_WRITE_CTL(ring, 0);
509         I915_WRITE_HEAD(ring, 0);
510         ring->write_tail(ring, 0);
511
512         if (!IS_GEN2(ring->dev)) {
513                 (void)I915_READ_CTL(ring);
514                 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
515         }
516
517         return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
518 }
519
520 static int init_ring_common(struct intel_engine_cs *ring)
521 {
522         struct drm_device *dev = ring->dev;
523         struct drm_i915_private *dev_priv = dev->dev_private;
524         struct intel_ringbuffer *ringbuf = ring->buffer;
525         struct drm_i915_gem_object *obj = ringbuf->obj;
526         int ret = 0;
527
528         gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
529
530         if (!stop_ring(ring)) {
531                 /* G45 ring initialization often fails to reset head to zero */
532                 DRM_DEBUG_KMS("%s head not reset to zero "
533                               "ctl %08x head %08x tail %08x start %08x\n",
534                               ring->name,
535                               I915_READ_CTL(ring),
536                               I915_READ_HEAD(ring),
537                               I915_READ_TAIL(ring),
538                               I915_READ_START(ring));
539
540                 if (!stop_ring(ring)) {
541                         DRM_ERROR("failed to set %s head to zero "
542                                   "ctl %08x head %08x tail %08x start %08x\n",
543                                   ring->name,
544                                   I915_READ_CTL(ring),
545                                   I915_READ_HEAD(ring),
546                                   I915_READ_TAIL(ring),
547                                   I915_READ_START(ring));
548                         ret = -EIO;
549                         goto out;
550                 }
551         }
552
553         if (I915_NEED_GFX_HWS(dev))
554                 intel_ring_setup_status_page(ring);
555         else
556                 ring_setup_phys_status_page(ring);
557
558         /* Enforce ordering by reading HEAD register back */
559         I915_READ_HEAD(ring);
560
561         /* Initialize the ring. This must happen _after_ we've cleared the ring
562          * registers with the above sequence (the readback of the HEAD registers
563          * also enforces ordering), otherwise the hw might lose the new ring
564          * register values. */
565         I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
566
567         /* WaClearRingBufHeadRegAtInit:ctg,elk */
568         if (I915_READ_HEAD(ring))
569                 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
570                           ring->name, I915_READ_HEAD(ring));
571         I915_WRITE_HEAD(ring, 0);
572         (void)I915_READ_HEAD(ring);
573
574         I915_WRITE_CTL(ring,
575                         ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
576                         | RING_VALID);
577
578         /* If the head is still not zero, the ring is dead */
579         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
580                      I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
581                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
582                 DRM_ERROR("%s initialization failed "
583                           "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
584                           ring->name,
585                           I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
586                           I915_READ_HEAD(ring), I915_READ_TAIL(ring),
587                           I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
588                 ret = -EIO;
589                 goto out;
590         }
591
592         ringbuf->head = I915_READ_HEAD(ring);
593         ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
594         ringbuf->space = intel_ring_space(ringbuf);
595         ringbuf->last_retired_head = -1;
596
597         memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
598
599 out:
600         gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
601
602         return ret;
603 }
604
605 void
606 intel_fini_pipe_control(struct intel_engine_cs *ring)
607 {
608         struct drm_device *dev = ring->dev;
609
610         if (ring->scratch.obj == NULL)
611                 return;
612
613         if (INTEL_INFO(dev)->gen >= 5) {
614                 kunmap(sg_page(ring->scratch.obj->pages->sgl));
615                 i915_gem_object_ggtt_unpin(ring->scratch.obj);
616         }
617
618         drm_gem_object_unreference(&ring->scratch.obj->base);
619         ring->scratch.obj = NULL;
620 }
621
622 int
623 intel_init_pipe_control(struct intel_engine_cs *ring)
624 {
625         int ret;
626
627         if (ring->scratch.obj)
628                 return 0;
629
630         ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
631         if (ring->scratch.obj == NULL) {
632                 DRM_ERROR("Failed to allocate seqno page\n");
633                 ret = -ENOMEM;
634                 goto err;
635         }
636
637         ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
638         if (ret)
639                 goto err_unref;
640
641         ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
642         if (ret)
643                 goto err_unref;
644
645         ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
646         ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
647         if (ring->scratch.cpu_page == NULL) {
648                 ret = -ENOMEM;
649                 goto err_unpin;
650         }
651
652         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
653                          ring->name, ring->scratch.gtt_offset);
654         return 0;
655
656 err_unpin:
657         i915_gem_object_ggtt_unpin(ring->scratch.obj);
658 err_unref:
659         drm_gem_object_unreference(&ring->scratch.obj->base);
660 err:
661         return ret;
662 }
663
664 static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
665                                        struct intel_context *ctx)
666 {
667         int ret, i;
668         struct drm_device *dev = ring->dev;
669         struct drm_i915_private *dev_priv = dev->dev_private;
670         struct i915_workarounds *w = &dev_priv->workarounds;
671
672         if (WARN_ON(w->count == 0))
673                 return 0;
674
675         ring->gpu_caches_dirty = true;
676         ret = intel_ring_flush_all_caches(ring);
677         if (ret)
678                 return ret;
679
680         ret = intel_ring_begin(ring, (w->count * 2 + 2));
681         if (ret)
682                 return ret;
683
684         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
685         for (i = 0; i < w->count; i++) {
686                 intel_ring_emit(ring, w->reg[i].addr);
687                 intel_ring_emit(ring, w->reg[i].value);
688         }
689         intel_ring_emit(ring, MI_NOOP);
690
691         intel_ring_advance(ring);
692
693         ring->gpu_caches_dirty = true;
694         ret = intel_ring_flush_all_caches(ring);
695         if (ret)
696                 return ret;
697
698         DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
699
700         return 0;
701 }
702
703 static int wa_add(struct drm_i915_private *dev_priv,
704                   const u32 addr, const u32 val, const u32 mask)
705 {
706         const u32 idx = dev_priv->workarounds.count;
707
708         if (WARN_ON(idx >= I915_MAX_WA_REGS))
709                 return -ENOSPC;
710
711         dev_priv->workarounds.reg[idx].addr = addr;
712         dev_priv->workarounds.reg[idx].value = val;
713         dev_priv->workarounds.reg[idx].mask = mask;
714
715         dev_priv->workarounds.count++;
716
717         return 0;
718 }
719
720 #define WA_REG(addr, val, mask) { \
721                 const int r = wa_add(dev_priv, (addr), (val), (mask)); \
722                 if (r) \
723                         return r; \
724         }
725
726 #define WA_SET_BIT_MASKED(addr, mask) \
727         WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff)
728
729 #define WA_CLR_BIT_MASKED(addr, mask) \
730         WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff)
731
732 #define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask)
733 #define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask)
734
735 #define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff)
736
737 static int bdw_init_workarounds(struct intel_engine_cs *ring)
738 {
739         struct drm_device *dev = ring->dev;
740         struct drm_i915_private *dev_priv = dev->dev_private;
741
742         /* WaDisablePartialInstShootdown:bdw */
743         /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
744         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
745                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
746                           STALL_DOP_GATING_DISABLE);
747
748         /* WaDisableDopClockGating:bdw */
749         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
750                           DOP_CLOCK_GATING_DISABLE);
751
752         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
753                           GEN8_SAMPLER_POWER_BYPASS_DIS);
754
755         /* Use Force Non-Coherent whenever executing a 3D context. This is a
756          * workaround for for a possible hang in the unlikely event a TLB
757          * invalidation occurs during a PSD flush.
758          */
759         /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
760         WA_SET_BIT_MASKED(HDC_CHICKEN0,
761                           HDC_FORCE_NON_COHERENT |
762                           (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
763
764         /* Wa4x4STCOptimizationDisable:bdw */
765         WA_SET_BIT_MASKED(CACHE_MODE_1,
766                           GEN8_4x4_STC_OPTIMIZATION_DISABLE);
767
768         /*
769          * BSpec recommends 8x4 when MSAA is used,
770          * however in practice 16x4 seems fastest.
771          *
772          * Note that PS/WM thread counts depend on the WIZ hashing
773          * disable bit, which we don't touch here, but it's good
774          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
775          */
776         WA_SET_BIT_MASKED(GEN7_GT_MODE,
777                           GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
778
779         return 0;
780 }
781
782 static int chv_init_workarounds(struct intel_engine_cs *ring)
783 {
784         struct drm_device *dev = ring->dev;
785         struct drm_i915_private *dev_priv = dev->dev_private;
786
787         /* WaDisablePartialInstShootdown:chv */
788         /* WaDisableThreadStallDopClockGating:chv */
789         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
790                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
791                           STALL_DOP_GATING_DISABLE);
792
793         /* Use Force Non-Coherent whenever executing a 3D context. This is a
794          * workaround for a possible hang in the unlikely event a TLB
795          * invalidation occurs during a PSD flush.
796          */
797         /* WaForceEnableNonCoherent:chv */
798         /* WaHdcDisableFetchWhenMasked:chv */
799         WA_SET_BIT_MASKED(HDC_CHICKEN0,
800                           HDC_FORCE_NON_COHERENT |
801                           HDC_DONOT_FETCH_MEM_WHEN_MASKED);
802
803         return 0;
804 }
805
806 int init_workarounds_ring(struct intel_engine_cs *ring)
807 {
808         struct drm_device *dev = ring->dev;
809         struct drm_i915_private *dev_priv = dev->dev_private;
810
811         WARN_ON(ring->id != RCS);
812
813         dev_priv->workarounds.count = 0;
814
815         if (IS_BROADWELL(dev))
816                 return bdw_init_workarounds(ring);
817
818         if (IS_CHERRYVIEW(dev))
819                 return chv_init_workarounds(ring);
820
821         return 0;
822 }
823
824 static int init_render_ring(struct intel_engine_cs *ring)
825 {
826         struct drm_device *dev = ring->dev;
827         struct drm_i915_private *dev_priv = dev->dev_private;
828         int ret = init_ring_common(ring);
829         if (ret)
830                 return ret;
831
832         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
833         if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
834                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
835
836         /* We need to disable the AsyncFlip performance optimisations in order
837          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
838          * programmed to '1' on all products.
839          *
840          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
841          */
842         if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
843                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
844
845         /* Required for the hardware to program scanline values for waiting */
846         /* WaEnableFlushTlbInvalidationMode:snb */
847         if (INTEL_INFO(dev)->gen == 6)
848                 I915_WRITE(GFX_MODE,
849                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
850
851         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
852         if (IS_GEN7(dev))
853                 I915_WRITE(GFX_MODE_GEN7,
854                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
855                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
856
857         if (INTEL_INFO(dev)->gen >= 5) {
858                 ret = intel_init_pipe_control(ring);
859                 if (ret)
860                         return ret;
861         }
862
863         if (IS_GEN6(dev)) {
864                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
865                  * "If this bit is set, STCunit will have LRA as replacement
866                  *  policy. [...] This bit must be reset.  LRA replacement
867                  *  policy is not supported."
868                  */
869                 I915_WRITE(CACHE_MODE_0,
870                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
871         }
872
873         if (INTEL_INFO(dev)->gen >= 6)
874                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
875
876         if (HAS_L3_DPF(dev))
877                 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
878
879         return init_workarounds_ring(ring);
880 }
881
882 static void render_ring_cleanup(struct intel_engine_cs *ring)
883 {
884         struct drm_device *dev = ring->dev;
885         struct drm_i915_private *dev_priv = dev->dev_private;
886
887         if (dev_priv->semaphore_obj) {
888                 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
889                 drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
890                 dev_priv->semaphore_obj = NULL;
891         }
892
893         intel_fini_pipe_control(ring);
894 }
895
896 static int gen8_rcs_signal(struct intel_engine_cs *signaller,
897                            unsigned int num_dwords)
898 {
899 #define MBOX_UPDATE_DWORDS 8
900         struct drm_device *dev = signaller->dev;
901         struct drm_i915_private *dev_priv = dev->dev_private;
902         struct intel_engine_cs *waiter;
903         int i, ret, num_rings;
904
905         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
906         num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
907 #undef MBOX_UPDATE_DWORDS
908
909         ret = intel_ring_begin(signaller, num_dwords);
910         if (ret)
911                 return ret;
912
913         for_each_ring(waiter, dev_priv, i) {
914                 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
915                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
916                         continue;
917
918                 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
919                 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
920                                            PIPE_CONTROL_QW_WRITE |
921                                            PIPE_CONTROL_FLUSH_ENABLE);
922                 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
923                 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
924                 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
925                 intel_ring_emit(signaller, 0);
926                 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
927                                            MI_SEMAPHORE_TARGET(waiter->id));
928                 intel_ring_emit(signaller, 0);
929         }
930
931         return 0;
932 }
933
934 static int gen8_xcs_signal(struct intel_engine_cs *signaller,
935                            unsigned int num_dwords)
936 {
937 #define MBOX_UPDATE_DWORDS 6
938         struct drm_device *dev = signaller->dev;
939         struct drm_i915_private *dev_priv = dev->dev_private;
940         struct intel_engine_cs *waiter;
941         int i, ret, num_rings;
942
943         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
944         num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
945 #undef MBOX_UPDATE_DWORDS
946
947         ret = intel_ring_begin(signaller, num_dwords);
948         if (ret)
949                 return ret;
950
951         for_each_ring(waiter, dev_priv, i) {
952                 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
953                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
954                         continue;
955
956                 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
957                                            MI_FLUSH_DW_OP_STOREDW);
958                 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
959                                            MI_FLUSH_DW_USE_GTT);
960                 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
961                 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
962                 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
963                                            MI_SEMAPHORE_TARGET(waiter->id));
964                 intel_ring_emit(signaller, 0);
965         }
966
967         return 0;
968 }
969
970 static int gen6_signal(struct intel_engine_cs *signaller,
971                        unsigned int num_dwords)
972 {
973         struct drm_device *dev = signaller->dev;
974         struct drm_i915_private *dev_priv = dev->dev_private;
975         struct intel_engine_cs *useless;
976         int i, ret, num_rings;
977
978 #define MBOX_UPDATE_DWORDS 3
979         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
980         num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
981 #undef MBOX_UPDATE_DWORDS
982
983         ret = intel_ring_begin(signaller, num_dwords);
984         if (ret)
985                 return ret;
986
987         for_each_ring(useless, dev_priv, i) {
988                 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
989                 if (mbox_reg != GEN6_NOSYNC) {
990                         intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
991                         intel_ring_emit(signaller, mbox_reg);
992                         intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
993                 }
994         }
995
996         /* If num_dwords was rounded, make sure the tail pointer is correct */
997         if (num_rings % 2 == 0)
998                 intel_ring_emit(signaller, MI_NOOP);
999
1000         return 0;
1001 }
1002
1003 /**
1004  * gen6_add_request - Update the semaphore mailbox registers
1005  * 
1006  * @ring - ring that is adding a request
1007  * @seqno - return seqno stuck into the ring
1008  *
1009  * Update the mailbox registers in the *other* rings with the current seqno.
1010  * This acts like a signal in the canonical semaphore.
1011  */
1012 static int
1013 gen6_add_request(struct intel_engine_cs *ring)
1014 {
1015         int ret;
1016
1017         if (ring->semaphore.signal)
1018                 ret = ring->semaphore.signal(ring, 4);
1019         else
1020                 ret = intel_ring_begin(ring, 4);
1021
1022         if (ret)
1023                 return ret;
1024
1025         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1026         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1027         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1028         intel_ring_emit(ring, MI_USER_INTERRUPT);
1029         __intel_ring_advance(ring);
1030
1031         return 0;
1032 }
1033
1034 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
1035                                               u32 seqno)
1036 {
1037         struct drm_i915_private *dev_priv = dev->dev_private;
1038         return dev_priv->last_seqno < seqno;
1039 }
1040
1041 /**
1042  * intel_ring_sync - sync the waiter to the signaller on seqno
1043  *
1044  * @waiter - ring that is waiting
1045  * @signaller - ring which has, or will signal
1046  * @seqno - seqno which the waiter will block on
1047  */
1048
1049 static int
1050 gen8_ring_sync(struct intel_engine_cs *waiter,
1051                struct intel_engine_cs *signaller,
1052                u32 seqno)
1053 {
1054         struct drm_i915_private *dev_priv = waiter->dev->dev_private;
1055         int ret;
1056
1057         ret = intel_ring_begin(waiter, 4);
1058         if (ret)
1059                 return ret;
1060
1061         intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1062                                 MI_SEMAPHORE_GLOBAL_GTT |
1063                                 MI_SEMAPHORE_POLL |
1064                                 MI_SEMAPHORE_SAD_GTE_SDD);
1065         intel_ring_emit(waiter, seqno);
1066         intel_ring_emit(waiter,
1067                         lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1068         intel_ring_emit(waiter,
1069                         upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1070         intel_ring_advance(waiter);
1071         return 0;
1072 }
1073
1074 static int
1075 gen6_ring_sync(struct intel_engine_cs *waiter,
1076                struct intel_engine_cs *signaller,
1077                u32 seqno)
1078 {
1079         u32 dw1 = MI_SEMAPHORE_MBOX |
1080                   MI_SEMAPHORE_COMPARE |
1081                   MI_SEMAPHORE_REGISTER;
1082         u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
1083         int ret;
1084
1085         /* Throughout all of the GEM code, seqno passed implies our current
1086          * seqno is >= the last seqno executed. However for hardware the
1087          * comparison is strictly greater than.
1088          */
1089         seqno -= 1;
1090
1091         WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1092
1093         ret = intel_ring_begin(waiter, 4);
1094         if (ret)
1095                 return ret;
1096
1097         /* If seqno wrap happened, omit the wait with no-ops */
1098         if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
1099                 intel_ring_emit(waiter, dw1 | wait_mbox);
1100                 intel_ring_emit(waiter, seqno);
1101                 intel_ring_emit(waiter, 0);
1102                 intel_ring_emit(waiter, MI_NOOP);
1103         } else {
1104                 intel_ring_emit(waiter, MI_NOOP);
1105                 intel_ring_emit(waiter, MI_NOOP);
1106                 intel_ring_emit(waiter, MI_NOOP);
1107                 intel_ring_emit(waiter, MI_NOOP);
1108         }
1109         intel_ring_advance(waiter);
1110
1111         return 0;
1112 }
1113
1114 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
1115 do {                                                                    \
1116         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
1117                  PIPE_CONTROL_DEPTH_STALL);                             \
1118         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
1119         intel_ring_emit(ring__, 0);                                                     \
1120         intel_ring_emit(ring__, 0);                                                     \
1121 } while (0)
1122
1123 static int
1124 pc_render_add_request(struct intel_engine_cs *ring)
1125 {
1126         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1127         int ret;
1128
1129         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
1130          * incoherent with writes to memory, i.e. completely fubar,
1131          * so we need to use PIPE_NOTIFY instead.
1132          *
1133          * However, we also need to workaround the qword write
1134          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
1135          * memory before requesting an interrupt.
1136          */
1137         ret = intel_ring_begin(ring, 32);
1138         if (ret)
1139                 return ret;
1140
1141         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1142                         PIPE_CONTROL_WRITE_FLUSH |
1143                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
1144         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1145         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1146         intel_ring_emit(ring, 0);
1147         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1148         scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
1149         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1150         scratch_addr += 2 * CACHELINE_BYTES;
1151         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1152         scratch_addr += 2 * CACHELINE_BYTES;
1153         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1154         scratch_addr += 2 * CACHELINE_BYTES;
1155         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1156         scratch_addr += 2 * CACHELINE_BYTES;
1157         PIPE_CONTROL_FLUSH(ring, scratch_addr);
1158
1159         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1160                         PIPE_CONTROL_WRITE_FLUSH |
1161                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1162                         PIPE_CONTROL_NOTIFY);
1163         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1164         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1165         intel_ring_emit(ring, 0);
1166         __intel_ring_advance(ring);
1167
1168         return 0;
1169 }
1170
1171 static u32
1172 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1173 {
1174         /* Workaround to force correct ordering between irq and seqno writes on
1175          * ivb (and maybe also on snb) by reading from a CS register (like
1176          * ACTHD) before reading the status page. */
1177         if (!lazy_coherency) {
1178                 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1179                 POSTING_READ(RING_ACTHD(ring->mmio_base));
1180         }
1181
1182         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1183 }
1184
1185 static u32
1186 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1187 {
1188         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1189 }
1190
1191 static void
1192 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1193 {
1194         intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1195 }
1196
1197 static u32
1198 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1199 {
1200         return ring->scratch.cpu_page[0];
1201 }
1202
1203 static void
1204 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1205 {
1206         ring->scratch.cpu_page[0] = seqno;
1207 }
1208
1209 static bool
1210 gen5_ring_get_irq(struct intel_engine_cs *ring)
1211 {
1212         struct drm_device *dev = ring->dev;
1213         struct drm_i915_private *dev_priv = dev->dev_private;
1214         unsigned long flags;
1215
1216         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1217                 return false;
1218
1219         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1220         if (ring->irq_refcount++ == 0)
1221                 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1222         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1223
1224         return true;
1225 }
1226
1227 static void
1228 gen5_ring_put_irq(struct intel_engine_cs *ring)
1229 {
1230         struct drm_device *dev = ring->dev;
1231         struct drm_i915_private *dev_priv = dev->dev_private;
1232         unsigned long flags;
1233
1234         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1235         if (--ring->irq_refcount == 0)
1236                 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1237         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1238 }
1239
1240 static bool
1241 i9xx_ring_get_irq(struct intel_engine_cs *ring)
1242 {
1243         struct drm_device *dev = ring->dev;
1244         struct drm_i915_private *dev_priv = dev->dev_private;
1245         unsigned long flags;
1246
1247         if (!intel_irqs_enabled(dev_priv))
1248                 return false;
1249
1250         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1251         if (ring->irq_refcount++ == 0) {
1252                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1253                 I915_WRITE(IMR, dev_priv->irq_mask);
1254                 POSTING_READ(IMR);
1255         }
1256         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1257
1258         return true;
1259 }
1260
1261 static void
1262 i9xx_ring_put_irq(struct intel_engine_cs *ring)
1263 {
1264         struct drm_device *dev = ring->dev;
1265         struct drm_i915_private *dev_priv = dev->dev_private;
1266         unsigned long flags;
1267
1268         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1269         if (--ring->irq_refcount == 0) {
1270                 dev_priv->irq_mask |= ring->irq_enable_mask;
1271                 I915_WRITE(IMR, dev_priv->irq_mask);
1272                 POSTING_READ(IMR);
1273         }
1274         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1275 }
1276
1277 static bool
1278 i8xx_ring_get_irq(struct intel_engine_cs *ring)
1279 {
1280         struct drm_device *dev = ring->dev;
1281         struct drm_i915_private *dev_priv = dev->dev_private;
1282         unsigned long flags;
1283
1284         if (!intel_irqs_enabled(dev_priv))
1285                 return false;
1286
1287         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1288         if (ring->irq_refcount++ == 0) {
1289                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1290                 I915_WRITE16(IMR, dev_priv->irq_mask);
1291                 POSTING_READ16(IMR);
1292         }
1293         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1294
1295         return true;
1296 }
1297
1298 static void
1299 i8xx_ring_put_irq(struct intel_engine_cs *ring)
1300 {
1301         struct drm_device *dev = ring->dev;
1302         struct drm_i915_private *dev_priv = dev->dev_private;
1303         unsigned long flags;
1304
1305         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1306         if (--ring->irq_refcount == 0) {
1307                 dev_priv->irq_mask |= ring->irq_enable_mask;
1308                 I915_WRITE16(IMR, dev_priv->irq_mask);
1309                 POSTING_READ16(IMR);
1310         }
1311         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1312 }
1313
1314 void intel_ring_setup_status_page(struct intel_engine_cs *ring)
1315 {
1316         struct drm_device *dev = ring->dev;
1317         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1318         u32 mmio = 0;
1319
1320         /* The ring status page addresses are no longer next to the rest of
1321          * the ring registers as of gen7.
1322          */
1323         if (IS_GEN7(dev)) {
1324                 switch (ring->id) {
1325                 case RCS:
1326                         mmio = RENDER_HWS_PGA_GEN7;
1327                         break;
1328                 case BCS:
1329                         mmio = BLT_HWS_PGA_GEN7;
1330                         break;
1331                 /*
1332                  * VCS2 actually doesn't exist on Gen7. Only shut up
1333                  * gcc switch check warning
1334                  */
1335                 case VCS2:
1336                 case VCS:
1337                         mmio = BSD_HWS_PGA_GEN7;
1338                         break;
1339                 case VECS:
1340                         mmio = VEBOX_HWS_PGA_GEN7;
1341                         break;
1342                 }
1343         } else if (IS_GEN6(ring->dev)) {
1344                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
1345         } else {
1346                 /* XXX: gen8 returns to sanity */
1347                 mmio = RING_HWS_PGA(ring->mmio_base);
1348         }
1349
1350         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
1351         POSTING_READ(mmio);
1352
1353         /*
1354          * Flush the TLB for this page
1355          *
1356          * FIXME: These two bits have disappeared on gen8, so a question
1357          * arises: do we still need this and if so how should we go about
1358          * invalidating the TLB?
1359          */
1360         if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
1361                 u32 reg = RING_INSTPM(ring->mmio_base);
1362
1363                 /* ring should be idle before issuing a sync flush*/
1364                 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1365
1366                 I915_WRITE(reg,
1367                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
1368                                               INSTPM_SYNC_FLUSH));
1369                 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1370                              1000))
1371                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
1372                                   ring->name);
1373         }
1374 }
1375
1376 static int
1377 bsd_ring_flush(struct intel_engine_cs *ring,
1378                u32     invalidate_domains,
1379                u32     flush_domains)
1380 {
1381         int ret;
1382
1383         ret = intel_ring_begin(ring, 2);
1384         if (ret)
1385                 return ret;
1386
1387         intel_ring_emit(ring, MI_FLUSH);
1388         intel_ring_emit(ring, MI_NOOP);
1389         intel_ring_advance(ring);
1390         return 0;
1391 }
1392
1393 static int
1394 i9xx_add_request(struct intel_engine_cs *ring)
1395 {
1396         int ret;
1397
1398         ret = intel_ring_begin(ring, 4);
1399         if (ret)
1400                 return ret;
1401
1402         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1403         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1404         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1405         intel_ring_emit(ring, MI_USER_INTERRUPT);
1406         __intel_ring_advance(ring);
1407
1408         return 0;
1409 }
1410
1411 static bool
1412 gen6_ring_get_irq(struct intel_engine_cs *ring)
1413 {
1414         struct drm_device *dev = ring->dev;
1415         struct drm_i915_private *dev_priv = dev->dev_private;
1416         unsigned long flags;
1417
1418         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1419                 return false;
1420
1421         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1422         if (ring->irq_refcount++ == 0) {
1423                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1424                         I915_WRITE_IMR(ring,
1425                                        ~(ring->irq_enable_mask |
1426                                          GT_PARITY_ERROR(dev)));
1427                 else
1428                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1429                 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1430         }
1431         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1432
1433         return true;
1434 }
1435
1436 static void
1437 gen6_ring_put_irq(struct intel_engine_cs *ring)
1438 {
1439         struct drm_device *dev = ring->dev;
1440         struct drm_i915_private *dev_priv = dev->dev_private;
1441         unsigned long flags;
1442
1443         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1444         if (--ring->irq_refcount == 0) {
1445                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1446                         I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1447                 else
1448                         I915_WRITE_IMR(ring, ~0);
1449                 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1450         }
1451         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1452 }
1453
1454 static bool
1455 hsw_vebox_get_irq(struct intel_engine_cs *ring)
1456 {
1457         struct drm_device *dev = ring->dev;
1458         struct drm_i915_private *dev_priv = dev->dev_private;
1459         unsigned long flags;
1460
1461         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1462                 return false;
1463
1464         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1465         if (ring->irq_refcount++ == 0) {
1466                 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1467                 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1468         }
1469         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1470
1471         return true;
1472 }
1473
1474 static void
1475 hsw_vebox_put_irq(struct intel_engine_cs *ring)
1476 {
1477         struct drm_device *dev = ring->dev;
1478         struct drm_i915_private *dev_priv = dev->dev_private;
1479         unsigned long flags;
1480
1481         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1482         if (--ring->irq_refcount == 0) {
1483                 I915_WRITE_IMR(ring, ~0);
1484                 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1485         }
1486         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1487 }
1488
1489 static bool
1490 gen8_ring_get_irq(struct intel_engine_cs *ring)
1491 {
1492         struct drm_device *dev = ring->dev;
1493         struct drm_i915_private *dev_priv = dev->dev_private;
1494         unsigned long flags;
1495
1496         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1497                 return false;
1498
1499         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1500         if (ring->irq_refcount++ == 0) {
1501                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1502                         I915_WRITE_IMR(ring,
1503                                        ~(ring->irq_enable_mask |
1504                                          GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1505                 } else {
1506                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1507                 }
1508                 POSTING_READ(RING_IMR(ring->mmio_base));
1509         }
1510         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1511
1512         return true;
1513 }
1514
1515 static void
1516 gen8_ring_put_irq(struct intel_engine_cs *ring)
1517 {
1518         struct drm_device *dev = ring->dev;
1519         struct drm_i915_private *dev_priv = dev->dev_private;
1520         unsigned long flags;
1521
1522         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1523         if (--ring->irq_refcount == 0) {
1524                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1525                         I915_WRITE_IMR(ring,
1526                                        ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1527                 } else {
1528                         I915_WRITE_IMR(ring, ~0);
1529                 }
1530                 POSTING_READ(RING_IMR(ring->mmio_base));
1531         }
1532         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1533 }
1534
1535 static int
1536 i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1537                          u64 offset, u32 length,
1538                          unsigned flags)
1539 {
1540         int ret;
1541
1542         ret = intel_ring_begin(ring, 2);
1543         if (ret)
1544                 return ret;
1545
1546         intel_ring_emit(ring,
1547                         MI_BATCH_BUFFER_START |
1548                         MI_BATCH_GTT |
1549                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1550         intel_ring_emit(ring, offset);
1551         intel_ring_advance(ring);
1552
1553         return 0;
1554 }
1555
1556 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1557 #define I830_BATCH_LIMIT (256*1024)
1558 #define I830_TLB_ENTRIES (2)
1559 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1560 static int
1561 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1562                                 u64 offset, u32 len,
1563                                 unsigned flags)
1564 {
1565         u32 cs_offset = ring->scratch.gtt_offset;
1566         int ret;
1567
1568         ret = intel_ring_begin(ring, 6);
1569         if (ret)
1570                 return ret;
1571
1572         /* Evict the invalid PTE TLBs */
1573         intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1574         intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1575         intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1576         intel_ring_emit(ring, cs_offset);
1577         intel_ring_emit(ring, 0xdeadbeef);
1578         intel_ring_emit(ring, MI_NOOP);
1579         intel_ring_advance(ring);
1580
1581         if ((flags & I915_DISPATCH_PINNED) == 0) {
1582                 if (len > I830_BATCH_LIMIT)
1583                         return -ENOSPC;
1584
1585                 ret = intel_ring_begin(ring, 6 + 2);
1586                 if (ret)
1587                         return ret;
1588
1589                 /* Blit the batch (which has now all relocs applied) to the
1590                  * stable batch scratch bo area (so that the CS never
1591                  * stumbles over its tlb invalidation bug) ...
1592                  */
1593                 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1594                 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1595                 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1596                 intel_ring_emit(ring, cs_offset);
1597                 intel_ring_emit(ring, 4096);
1598                 intel_ring_emit(ring, offset);
1599
1600                 intel_ring_emit(ring, MI_FLUSH);
1601                 intel_ring_emit(ring, MI_NOOP);
1602                 intel_ring_advance(ring);
1603
1604                 /* ... and execute it. */
1605                 offset = cs_offset;
1606         }
1607
1608         ret = intel_ring_begin(ring, 4);
1609         if (ret)
1610                 return ret;
1611
1612         intel_ring_emit(ring, MI_BATCH_BUFFER);
1613         intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1614         intel_ring_emit(ring, offset + len - 8);
1615         intel_ring_emit(ring, MI_NOOP);
1616         intel_ring_advance(ring);
1617
1618         return 0;
1619 }
1620
1621 static int
1622 i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1623                          u64 offset, u32 len,
1624                          unsigned flags)
1625 {
1626         int ret;
1627
1628         ret = intel_ring_begin(ring, 2);
1629         if (ret)
1630                 return ret;
1631
1632         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1633         intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1634         intel_ring_advance(ring);
1635
1636         return 0;
1637 }
1638
1639 static void cleanup_status_page(struct intel_engine_cs *ring)
1640 {
1641         struct drm_i915_gem_object *obj;
1642
1643         obj = ring->status_page.obj;
1644         if (obj == NULL)
1645                 return;
1646
1647         kunmap(sg_page(obj->pages->sgl));
1648         i915_gem_object_ggtt_unpin(obj);
1649         drm_gem_object_unreference(&obj->base);
1650         ring->status_page.obj = NULL;
1651 }
1652
1653 static int init_status_page(struct intel_engine_cs *ring)
1654 {
1655         struct drm_i915_gem_object *obj;
1656
1657         if ((obj = ring->status_page.obj) == NULL) {
1658                 unsigned flags;
1659                 int ret;
1660
1661                 obj = i915_gem_alloc_object(ring->dev, 4096);
1662                 if (obj == NULL) {
1663                         DRM_ERROR("Failed to allocate status page\n");
1664                         return -ENOMEM;
1665                 }
1666
1667                 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1668                 if (ret)
1669                         goto err_unref;
1670
1671                 flags = 0;
1672                 if (!HAS_LLC(ring->dev))
1673                         /* On g33, we cannot place HWS above 256MiB, so
1674                          * restrict its pinning to the low mappable arena.
1675                          * Though this restriction is not documented for
1676                          * gen4, gen5, or byt, they also behave similarly
1677                          * and hang if the HWS is placed at the top of the
1678                          * GTT. To generalise, it appears that all !llc
1679                          * platforms have issues with us placing the HWS
1680                          * above the mappable region (even though we never
1681                          * actualy map it).
1682                          */
1683                         flags |= PIN_MAPPABLE;
1684                 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
1685                 if (ret) {
1686 err_unref:
1687                         drm_gem_object_unreference(&obj->base);
1688                         return ret;
1689                 }
1690
1691                 ring->status_page.obj = obj;
1692         }
1693
1694         ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1695         ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1696         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1697
1698         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1699                         ring->name, ring->status_page.gfx_addr);
1700
1701         return 0;
1702 }
1703
1704 static int init_phys_status_page(struct intel_engine_cs *ring)
1705 {
1706         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1707
1708         if (!dev_priv->status_page_dmah) {
1709                 dev_priv->status_page_dmah =
1710                         drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1711                 if (!dev_priv->status_page_dmah)
1712                         return -ENOMEM;
1713         }
1714
1715         ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1716         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1717
1718         return 0;
1719 }
1720
1721 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1722 {
1723         iounmap(ringbuf->virtual_start);
1724         ringbuf->virtual_start = NULL;
1725         i915_gem_object_ggtt_unpin(ringbuf->obj);
1726 }
1727
1728 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
1729                                      struct intel_ringbuffer *ringbuf)
1730 {
1731         struct drm_i915_private *dev_priv = to_i915(dev);
1732         struct drm_i915_gem_object *obj = ringbuf->obj;
1733         int ret;
1734
1735         ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1736         if (ret)
1737                 return ret;
1738
1739         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1740         if (ret) {
1741                 i915_gem_object_ggtt_unpin(obj);
1742                 return ret;
1743         }
1744
1745         ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
1746                         i915_gem_obj_ggtt_offset(obj), ringbuf->size);
1747         if (ringbuf->virtual_start == NULL) {
1748                 i915_gem_object_ggtt_unpin(obj);
1749                 return -EINVAL;
1750         }
1751
1752         return 0;
1753 }
1754
1755 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1756 {
1757         drm_gem_object_unreference(&ringbuf->obj->base);
1758         ringbuf->obj = NULL;
1759 }
1760
1761 int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1762                                struct intel_ringbuffer *ringbuf)
1763 {
1764         struct drm_i915_gem_object *obj;
1765
1766         obj = NULL;
1767         if (!HAS_LLC(dev))
1768                 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
1769         if (obj == NULL)
1770                 obj = i915_gem_alloc_object(dev, ringbuf->size);
1771         if (obj == NULL)
1772                 return -ENOMEM;
1773
1774         /* mark ring buffers as read-only from GPU side by default */
1775         obj->gt_ro = 1;
1776
1777         ringbuf->obj = obj;
1778
1779         return 0;
1780 }
1781
1782 static int intel_init_ring_buffer(struct drm_device *dev,
1783                                   struct intel_engine_cs *ring)
1784 {
1785         struct intel_ringbuffer *ringbuf = ring->buffer;
1786         int ret;
1787
1788         if (ringbuf == NULL) {
1789                 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1790                 if (!ringbuf)
1791                         return -ENOMEM;
1792                 ring->buffer = ringbuf;
1793         }
1794
1795         ring->dev = dev;
1796         INIT_LIST_HEAD(&ring->active_list);
1797         INIT_LIST_HEAD(&ring->request_list);
1798         INIT_LIST_HEAD(&ring->execlist_queue);
1799         ringbuf->size = 32 * PAGE_SIZE;
1800         ringbuf->ring = ring;
1801         memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1802
1803         init_waitqueue_head(&ring->irq_queue);
1804
1805         if (I915_NEED_GFX_HWS(dev)) {
1806                 ret = init_status_page(ring);
1807                 if (ret)
1808                         goto error;
1809         } else {
1810                 BUG_ON(ring->id != RCS);
1811                 ret = init_phys_status_page(ring);
1812                 if (ret)
1813                         goto error;
1814         }
1815
1816         if (ringbuf->obj == NULL) {
1817                 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1818                 if (ret) {
1819                         DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
1820                                         ring->name, ret);
1821                         goto error;
1822                 }
1823
1824                 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
1825                 if (ret) {
1826                         DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
1827                                         ring->name, ret);
1828                         intel_destroy_ringbuffer_obj(ringbuf);
1829                         goto error;
1830                 }
1831         }
1832
1833         /* Workaround an erratum on the i830 which causes a hang if
1834          * the TAIL pointer points to within the last 2 cachelines
1835          * of the buffer.
1836          */
1837         ringbuf->effective_size = ringbuf->size;
1838         if (IS_I830(dev) || IS_845G(dev))
1839                 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
1840
1841         ret = i915_cmd_parser_init_ring(ring);
1842         if (ret)
1843                 goto error;
1844
1845         ret = ring->init(ring);
1846         if (ret)
1847                 goto error;
1848
1849         return 0;
1850
1851 error:
1852         kfree(ringbuf);
1853         ring->buffer = NULL;
1854         return ret;
1855 }
1856
1857 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1858 {
1859         struct drm_i915_private *dev_priv;
1860         struct intel_ringbuffer *ringbuf;
1861
1862         if (!intel_ring_initialized(ring))
1863                 return;
1864
1865         dev_priv = to_i915(ring->dev);
1866         ringbuf = ring->buffer;
1867
1868         intel_stop_ring_buffer(ring);
1869         WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1870
1871         intel_unpin_ringbuffer_obj(ringbuf);
1872         intel_destroy_ringbuffer_obj(ringbuf);
1873         ring->preallocated_lazy_request = NULL;
1874         ring->outstanding_lazy_seqno = 0;
1875
1876         if (ring->cleanup)
1877                 ring->cleanup(ring);
1878
1879         cleanup_status_page(ring);
1880
1881         i915_cmd_parser_fini_ring(ring);
1882
1883         kfree(ringbuf);
1884         ring->buffer = NULL;
1885 }
1886
1887 static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1888 {
1889         struct intel_ringbuffer *ringbuf = ring->buffer;
1890         struct drm_i915_gem_request *request;
1891         u32 seqno = 0;
1892         int ret;
1893
1894         if (ringbuf->last_retired_head != -1) {
1895                 ringbuf->head = ringbuf->last_retired_head;
1896                 ringbuf->last_retired_head = -1;
1897
1898                 ringbuf->space = intel_ring_space(ringbuf);
1899                 if (ringbuf->space >= n)
1900                         return 0;
1901         }
1902
1903         list_for_each_entry(request, &ring->request_list, list) {
1904                 if (__intel_ring_space(request->tail, ringbuf->tail,
1905                                        ringbuf->size) >= n) {
1906                         seqno = request->seqno;
1907                         break;
1908                 }
1909         }
1910
1911         if (seqno == 0)
1912                 return -ENOSPC;
1913
1914         ret = i915_wait_seqno(ring, seqno);
1915         if (ret)
1916                 return ret;
1917
1918         i915_gem_retire_requests_ring(ring);
1919         ringbuf->head = ringbuf->last_retired_head;
1920         ringbuf->last_retired_head = -1;
1921
1922         ringbuf->space = intel_ring_space(ringbuf);
1923         return 0;
1924 }
1925
1926 static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1927 {
1928         struct drm_device *dev = ring->dev;
1929         struct drm_i915_private *dev_priv = dev->dev_private;
1930         struct intel_ringbuffer *ringbuf = ring->buffer;
1931         unsigned long end;
1932         int ret;
1933
1934         ret = intel_ring_wait_request(ring, n);
1935         if (ret != -ENOSPC)
1936                 return ret;
1937
1938         /* force the tail write in case we have been skipping them */
1939         __intel_ring_advance(ring);
1940
1941         /* With GEM the hangcheck timer should kick us out of the loop,
1942          * leaving it early runs the risk of corrupting GEM state (due
1943          * to running on almost untested codepaths). But on resume
1944          * timers don't work yet, so prevent a complete hang in that
1945          * case by choosing an insanely large timeout. */
1946         end = jiffies + 60 * HZ;
1947
1948         trace_i915_ring_wait_begin(ring);
1949         do {
1950                 ringbuf->head = I915_READ_HEAD(ring);
1951                 ringbuf->space = intel_ring_space(ringbuf);
1952                 if (ringbuf->space >= n) {
1953                         ret = 0;
1954                         break;
1955                 }
1956
1957                 msleep(1);
1958
1959                 if (dev_priv->mm.interruptible && signal_pending(current)) {
1960                         ret = -ERESTARTSYS;
1961                         break;
1962                 }
1963
1964                 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1965                                            dev_priv->mm.interruptible);
1966                 if (ret)
1967                         break;
1968
1969                 if (time_after(jiffies, end)) {
1970                         ret = -EBUSY;
1971                         break;
1972                 }
1973         } while (1);
1974         trace_i915_ring_wait_end(ring);
1975         return ret;
1976 }
1977
1978 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1979 {
1980         uint32_t __iomem *virt;
1981         struct intel_ringbuffer *ringbuf = ring->buffer;
1982         int rem = ringbuf->size - ringbuf->tail;
1983
1984         if (ringbuf->space < rem) {
1985                 int ret = ring_wait_for_space(ring, rem);
1986                 if (ret)
1987                         return ret;
1988         }
1989
1990         virt = ringbuf->virtual_start + ringbuf->tail;
1991         rem /= 4;
1992         while (rem--)
1993                 iowrite32(MI_NOOP, virt++);
1994
1995         ringbuf->tail = 0;
1996         ringbuf->space = intel_ring_space(ringbuf);
1997
1998         return 0;
1999 }
2000
2001 int intel_ring_idle(struct intel_engine_cs *ring)
2002 {
2003         u32 seqno;
2004         int ret;
2005
2006         /* We need to add any requests required to flush the objects and ring */
2007         if (ring->outstanding_lazy_seqno) {
2008                 ret = i915_add_request(ring, NULL);
2009                 if (ret)
2010                         return ret;
2011         }
2012
2013         /* Wait upon the last request to be completed */
2014         if (list_empty(&ring->request_list))
2015                 return 0;
2016
2017         seqno = list_entry(ring->request_list.prev,
2018                            struct drm_i915_gem_request,
2019                            list)->seqno;
2020
2021         return i915_wait_seqno(ring, seqno);
2022 }
2023
2024 static int
2025 intel_ring_alloc_seqno(struct intel_engine_cs *ring)
2026 {
2027         if (ring->outstanding_lazy_seqno)
2028                 return 0;
2029
2030         if (ring->preallocated_lazy_request == NULL) {
2031                 struct drm_i915_gem_request *request;
2032
2033                 request = kmalloc(sizeof(*request), GFP_KERNEL);
2034                 if (request == NULL)
2035                         return -ENOMEM;
2036
2037                 ring->preallocated_lazy_request = request;
2038         }
2039
2040         return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
2041 }
2042
2043 static int __intel_ring_prepare(struct intel_engine_cs *ring,
2044                                 int bytes)
2045 {
2046         struct intel_ringbuffer *ringbuf = ring->buffer;
2047         int ret;
2048
2049         if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
2050                 ret = intel_wrap_ring_buffer(ring);
2051                 if (unlikely(ret))
2052                         return ret;
2053         }
2054
2055         if (unlikely(ringbuf->space < bytes)) {
2056                 ret = ring_wait_for_space(ring, bytes);
2057                 if (unlikely(ret))
2058                         return ret;
2059         }
2060
2061         return 0;
2062 }
2063
2064 int intel_ring_begin(struct intel_engine_cs *ring,
2065                      int num_dwords)
2066 {
2067         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2068         int ret;
2069
2070         ret = i915_gem_check_wedge(&dev_priv->gpu_error,
2071                                    dev_priv->mm.interruptible);
2072         if (ret)
2073                 return ret;
2074
2075         ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
2076         if (ret)
2077                 return ret;
2078
2079         /* Preallocate the olr before touching the ring */
2080         ret = intel_ring_alloc_seqno(ring);
2081         if (ret)
2082                 return ret;
2083
2084         ring->buffer->space -= num_dwords * sizeof(uint32_t);
2085         return 0;
2086 }
2087
2088 /* Align the ring tail to a cacheline boundary */
2089 int intel_ring_cacheline_align(struct intel_engine_cs *ring)
2090 {
2091         int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
2092         int ret;
2093
2094         if (num_dwords == 0)
2095                 return 0;
2096
2097         num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2098         ret = intel_ring_begin(ring, num_dwords);
2099         if (ret)
2100                 return ret;
2101
2102         while (num_dwords--)
2103                 intel_ring_emit(ring, MI_NOOP);
2104
2105         intel_ring_advance(ring);
2106
2107         return 0;
2108 }
2109
2110 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
2111 {
2112         struct drm_device *dev = ring->dev;
2113         struct drm_i915_private *dev_priv = dev->dev_private;
2114
2115         BUG_ON(ring->outstanding_lazy_seqno);
2116
2117         if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
2118                 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
2119                 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
2120                 if (HAS_VEBOX(dev))
2121                         I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
2122         }
2123
2124         ring->set_seqno(ring, seqno);
2125         ring->hangcheck.seqno = seqno;
2126 }
2127
2128 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
2129                                      u32 value)
2130 {
2131         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2132
2133        /* Every tail move must follow the sequence below */
2134
2135         /* Disable notification that the ring is IDLE. The GT
2136          * will then assume that it is busy and bring it out of rc6.
2137          */
2138         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
2139                    _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2140
2141         /* Clear the context id. Here be magic! */
2142         I915_WRITE64(GEN6_BSD_RNCID, 0x0);
2143
2144         /* Wait for the ring not to be idle, i.e. for it to wake up. */
2145         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
2146                       GEN6_BSD_SLEEP_INDICATOR) == 0,
2147                      50))
2148                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2149
2150         /* Now that the ring is fully powered up, update the tail */
2151         I915_WRITE_TAIL(ring, value);
2152         POSTING_READ(RING_TAIL(ring->mmio_base));
2153
2154         /* Let the ring send IDLE messages to the GT again,
2155          * and so let it sleep to conserve power when idle.
2156          */
2157         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
2158                    _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2159 }
2160
2161 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
2162                                u32 invalidate, u32 flush)
2163 {
2164         uint32_t cmd;
2165         int ret;
2166
2167         ret = intel_ring_begin(ring, 4);
2168         if (ret)
2169                 return ret;
2170
2171         cmd = MI_FLUSH_DW;
2172         if (INTEL_INFO(ring->dev)->gen >= 8)
2173                 cmd += 1;
2174         /*
2175          * Bspec vol 1c.5 - video engine command streamer:
2176          * "If ENABLED, all TLBs will be invalidated once the flush
2177          * operation is complete. This bit is only valid when the
2178          * Post-Sync Operation field is a value of 1h or 3h."
2179          */
2180         if (invalidate & I915_GEM_GPU_DOMAINS)
2181                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
2182                         MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2183         intel_ring_emit(ring, cmd);
2184         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2185         if (INTEL_INFO(ring->dev)->gen >= 8) {
2186                 intel_ring_emit(ring, 0); /* upper addr */
2187                 intel_ring_emit(ring, 0); /* value */
2188         } else  {
2189                 intel_ring_emit(ring, 0);
2190                 intel_ring_emit(ring, MI_NOOP);
2191         }
2192         intel_ring_advance(ring);
2193         return 0;
2194 }
2195
2196 static int
2197 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2198                               u64 offset, u32 len,
2199                               unsigned flags)
2200 {
2201         bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
2202         int ret;
2203
2204         ret = intel_ring_begin(ring, 4);
2205         if (ret)
2206                 return ret;
2207
2208         /* FIXME(BDW): Address space and security selectors. */
2209         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
2210         intel_ring_emit(ring, lower_32_bits(offset));
2211         intel_ring_emit(ring, upper_32_bits(offset));
2212         intel_ring_emit(ring, MI_NOOP);
2213         intel_ring_advance(ring);
2214
2215         return 0;
2216 }
2217
2218 static int
2219 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2220                               u64 offset, u32 len,
2221                               unsigned flags)
2222 {
2223         int ret;
2224
2225         ret = intel_ring_begin(ring, 2);
2226         if (ret)
2227                 return ret;
2228
2229         intel_ring_emit(ring,
2230                         MI_BATCH_BUFFER_START |
2231                         (flags & I915_DISPATCH_SECURE ?
2232                          0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2233         /* bit0-7 is the length on GEN6+ */
2234         intel_ring_emit(ring, offset);
2235         intel_ring_advance(ring);
2236
2237         return 0;
2238 }
2239
2240 static int
2241 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2242                               u64 offset, u32 len,
2243                               unsigned flags)
2244 {
2245         int ret;
2246
2247         ret = intel_ring_begin(ring, 2);
2248         if (ret)
2249                 return ret;
2250
2251         intel_ring_emit(ring,
2252                         MI_BATCH_BUFFER_START |
2253                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
2254         /* bit0-7 is the length on GEN6+ */
2255         intel_ring_emit(ring, offset);
2256         intel_ring_advance(ring);
2257
2258         return 0;
2259 }
2260
2261 /* Blitter support (SandyBridge+) */
2262
2263 static int gen6_ring_flush(struct intel_engine_cs *ring,
2264                            u32 invalidate, u32 flush)
2265 {
2266         struct drm_device *dev = ring->dev;
2267         struct drm_i915_private *dev_priv = dev->dev_private;
2268         uint32_t cmd;
2269         int ret;
2270
2271         ret = intel_ring_begin(ring, 4);
2272         if (ret)
2273                 return ret;
2274
2275         cmd = MI_FLUSH_DW;
2276         if (INTEL_INFO(ring->dev)->gen >= 8)
2277                 cmd += 1;
2278         /*
2279          * Bspec vol 1c.3 - blitter engine command streamer:
2280          * "If ENABLED, all TLBs will be invalidated once the flush
2281          * operation is complete. This bit is only valid when the
2282          * Post-Sync Operation field is a value of 1h or 3h."
2283          */
2284         if (invalidate & I915_GEM_DOMAIN_RENDER)
2285                 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
2286                         MI_FLUSH_DW_OP_STOREDW;
2287         intel_ring_emit(ring, cmd);
2288         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2289         if (INTEL_INFO(ring->dev)->gen >= 8) {
2290                 intel_ring_emit(ring, 0); /* upper addr */
2291                 intel_ring_emit(ring, 0); /* value */
2292         } else  {
2293                 intel_ring_emit(ring, 0);
2294                 intel_ring_emit(ring, MI_NOOP);
2295         }
2296         intel_ring_advance(ring);
2297
2298         if (!invalidate && flush) {
2299                 if (IS_GEN7(dev))
2300                         return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
2301                 else if (IS_BROADWELL(dev))
2302                         dev_priv->fbc.need_sw_cache_clean = true;
2303         }
2304
2305         return 0;
2306 }
2307
2308 int intel_init_render_ring_buffer(struct drm_device *dev)
2309 {
2310         struct drm_i915_private *dev_priv = dev->dev_private;
2311         struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2312         struct drm_i915_gem_object *obj;
2313         int ret;
2314
2315         ring->name = "render ring";
2316         ring->id = RCS;
2317         ring->mmio_base = RENDER_RING_BASE;
2318
2319         if (INTEL_INFO(dev)->gen >= 8) {
2320                 if (i915_semaphore_is_enabled(dev)) {
2321                         obj = i915_gem_alloc_object(dev, 4096);
2322                         if (obj == NULL) {
2323                                 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2324                                 i915.semaphores = 0;
2325                         } else {
2326                                 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2327                                 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2328                                 if (ret != 0) {
2329                                         drm_gem_object_unreference(&obj->base);
2330                                         DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2331                                         i915.semaphores = 0;
2332                                 } else
2333                                         dev_priv->semaphore_obj = obj;
2334                         }
2335                 }
2336
2337                 ring->init_context = intel_ring_workarounds_emit;
2338                 ring->add_request = gen6_add_request;
2339                 ring->flush = gen8_render_ring_flush;
2340                 ring->irq_get = gen8_ring_get_irq;
2341                 ring->irq_put = gen8_ring_put_irq;
2342                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2343                 ring->get_seqno = gen6_ring_get_seqno;
2344                 ring->set_seqno = ring_set_seqno;
2345                 if (i915_semaphore_is_enabled(dev)) {
2346                         WARN_ON(!dev_priv->semaphore_obj);
2347                         ring->semaphore.sync_to = gen8_ring_sync;
2348                         ring->semaphore.signal = gen8_rcs_signal;
2349                         GEN8_RING_SEMAPHORE_INIT;
2350                 }
2351         } else if (INTEL_INFO(dev)->gen >= 6) {
2352                 ring->add_request = gen6_add_request;
2353                 ring->flush = gen7_render_ring_flush;
2354                 if (INTEL_INFO(dev)->gen == 6)
2355                         ring->flush = gen6_render_ring_flush;
2356                 ring->irq_get = gen6_ring_get_irq;
2357                 ring->irq_put = gen6_ring_put_irq;
2358                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2359                 ring->get_seqno = gen6_ring_get_seqno;
2360                 ring->set_seqno = ring_set_seqno;
2361                 if (i915_semaphore_is_enabled(dev)) {
2362                         ring->semaphore.sync_to = gen6_ring_sync;
2363                         ring->semaphore.signal = gen6_signal;
2364                         /*
2365                          * The current semaphore is only applied on pre-gen8
2366                          * platform.  And there is no VCS2 ring on the pre-gen8
2367                          * platform. So the semaphore between RCS and VCS2 is
2368                          * initialized as INVALID.  Gen8 will initialize the
2369                          * sema between VCS2 and RCS later.
2370                          */
2371                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2372                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2373                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2374                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2375                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2376                         ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2377                         ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2378                         ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2379                         ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2380                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2381                 }
2382         } else if (IS_GEN5(dev)) {
2383                 ring->add_request = pc_render_add_request;
2384                 ring->flush = gen4_render_ring_flush;
2385                 ring->get_seqno = pc_render_get_seqno;
2386                 ring->set_seqno = pc_render_set_seqno;
2387                 ring->irq_get = gen5_ring_get_irq;
2388                 ring->irq_put = gen5_ring_put_irq;
2389                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2390                                         GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2391         } else {
2392                 ring->add_request = i9xx_add_request;
2393                 if (INTEL_INFO(dev)->gen < 4)
2394                         ring->flush = gen2_render_ring_flush;
2395                 else
2396                         ring->flush = gen4_render_ring_flush;
2397                 ring->get_seqno = ring_get_seqno;
2398                 ring->set_seqno = ring_set_seqno;
2399                 if (IS_GEN2(dev)) {
2400                         ring->irq_get = i8xx_ring_get_irq;
2401                         ring->irq_put = i8xx_ring_put_irq;
2402                 } else {
2403                         ring->irq_get = i9xx_ring_get_irq;
2404                         ring->irq_put = i9xx_ring_put_irq;
2405                 }
2406                 ring->irq_enable_mask = I915_USER_INTERRUPT;
2407         }
2408         ring->write_tail = ring_write_tail;
2409
2410         if (IS_HASWELL(dev))
2411                 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2412         else if (IS_GEN8(dev))
2413                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2414         else if (INTEL_INFO(dev)->gen >= 6)
2415                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2416         else if (INTEL_INFO(dev)->gen >= 4)
2417                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2418         else if (IS_I830(dev) || IS_845G(dev))
2419                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2420         else
2421                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2422         ring->init = init_render_ring;
2423         ring->cleanup = render_ring_cleanup;
2424
2425         /* Workaround batchbuffer to combat CS tlb bug. */
2426         if (HAS_BROKEN_CS_TLB(dev)) {
2427                 obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
2428                 if (obj == NULL) {
2429                         DRM_ERROR("Failed to allocate batch bo\n");
2430                         return -ENOMEM;
2431                 }
2432
2433                 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
2434                 if (ret != 0) {
2435                         drm_gem_object_unreference(&obj->base);
2436                         DRM_ERROR("Failed to ping batch bo\n");
2437                         return ret;
2438                 }
2439
2440                 ring->scratch.obj = obj;
2441                 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
2442         }
2443
2444         return intel_init_ring_buffer(dev, ring);
2445 }
2446
2447 int intel_init_bsd_ring_buffer(struct drm_device *dev)
2448 {
2449         struct drm_i915_private *dev_priv = dev->dev_private;
2450         struct intel_engine_cs *ring = &dev_priv->ring[VCS];
2451
2452         ring->name = "bsd ring";
2453         ring->id = VCS;
2454
2455         ring->write_tail = ring_write_tail;
2456         if (INTEL_INFO(dev)->gen >= 6) {
2457                 ring->mmio_base = GEN6_BSD_RING_BASE;
2458                 /* gen6 bsd needs a special wa for tail updates */
2459                 if (IS_GEN6(dev))
2460                         ring->write_tail = gen6_bsd_ring_write_tail;
2461                 ring->flush = gen6_bsd_ring_flush;
2462                 ring->add_request = gen6_add_request;
2463                 ring->get_seqno = gen6_ring_get_seqno;
2464                 ring->set_seqno = ring_set_seqno;
2465                 if (INTEL_INFO(dev)->gen >= 8) {
2466                         ring->irq_enable_mask =
2467                                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2468                         ring->irq_get = gen8_ring_get_irq;
2469                         ring->irq_put = gen8_ring_put_irq;
2470                         ring->dispatch_execbuffer =
2471                                 gen8_ring_dispatch_execbuffer;
2472                         if (i915_semaphore_is_enabled(dev)) {
2473                                 ring->semaphore.sync_to = gen8_ring_sync;
2474                                 ring->semaphore.signal = gen8_xcs_signal;
2475                                 GEN8_RING_SEMAPHORE_INIT;
2476                         }
2477                 } else {
2478                         ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2479                         ring->irq_get = gen6_ring_get_irq;
2480                         ring->irq_put = gen6_ring_put_irq;
2481                         ring->dispatch_execbuffer =
2482                                 gen6_ring_dispatch_execbuffer;
2483                         if (i915_semaphore_is_enabled(dev)) {
2484                                 ring->semaphore.sync_to = gen6_ring_sync;
2485                                 ring->semaphore.signal = gen6_signal;
2486                                 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2487                                 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2488                                 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2489                                 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2490                                 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2491                                 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2492                                 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2493                                 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2494                                 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2495                                 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2496                         }
2497                 }
2498         } else {
2499                 ring->mmio_base = BSD_RING_BASE;
2500                 ring->flush = bsd_ring_flush;
2501                 ring->add_request = i9xx_add_request;
2502                 ring->get_seqno = ring_get_seqno;
2503                 ring->set_seqno = ring_set_seqno;
2504                 if (IS_GEN5(dev)) {
2505                         ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2506                         ring->irq_get = gen5_ring_get_irq;
2507                         ring->irq_put = gen5_ring_put_irq;
2508                 } else {
2509                         ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2510                         ring->irq_get = i9xx_ring_get_irq;
2511                         ring->irq_put = i9xx_ring_put_irq;
2512                 }
2513                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2514         }
2515         ring->init = init_ring_common;
2516
2517         return intel_init_ring_buffer(dev, ring);
2518 }
2519
2520 /**
2521  * Initialize the second BSD ring for Broadwell GT3.
2522  * It is noted that this only exists on Broadwell GT3.
2523  */
2524 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2525 {
2526         struct drm_i915_private *dev_priv = dev->dev_private;
2527         struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2528
2529         if ((INTEL_INFO(dev)->gen != 8)) {
2530                 DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
2531                 return -EINVAL;
2532         }
2533
2534         ring->name = "bsd2 ring";
2535         ring->id = VCS2;
2536
2537         ring->write_tail = ring_write_tail;
2538         ring->mmio_base = GEN8_BSD2_RING_BASE;
2539         ring->flush = gen6_bsd_ring_flush;
2540         ring->add_request = gen6_add_request;
2541         ring->get_seqno = gen6_ring_get_seqno;
2542         ring->set_seqno = ring_set_seqno;
2543         ring->irq_enable_mask =
2544                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
2545         ring->irq_get = gen8_ring_get_irq;
2546         ring->irq_put = gen8_ring_put_irq;
2547         ring->dispatch_execbuffer =
2548                         gen8_ring_dispatch_execbuffer;
2549         if (i915_semaphore_is_enabled(dev)) {
2550                 ring->semaphore.sync_to = gen8_ring_sync;
2551                 ring->semaphore.signal = gen8_xcs_signal;
2552                 GEN8_RING_SEMAPHORE_INIT;
2553         }
2554         ring->init = init_ring_common;
2555
2556         return intel_init_ring_buffer(dev, ring);
2557 }
2558
2559 int intel_init_blt_ring_buffer(struct drm_device *dev)
2560 {
2561         struct drm_i915_private *dev_priv = dev->dev_private;
2562         struct intel_engine_cs *ring = &dev_priv->ring[BCS];
2563
2564         ring->name = "blitter ring";
2565         ring->id = BCS;
2566
2567         ring->mmio_base = BLT_RING_BASE;
2568         ring->write_tail = ring_write_tail;
2569         ring->flush = gen6_ring_flush;
2570         ring->add_request = gen6_add_request;
2571         ring->get_seqno = gen6_ring_get_seqno;
2572         ring->set_seqno = ring_set_seqno;
2573         if (INTEL_INFO(dev)->gen >= 8) {
2574                 ring->irq_enable_mask =
2575                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2576                 ring->irq_get = gen8_ring_get_irq;
2577                 ring->irq_put = gen8_ring_put_irq;
2578                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2579                 if (i915_semaphore_is_enabled(dev)) {
2580                         ring->semaphore.sync_to = gen8_ring_sync;
2581                         ring->semaphore.signal = gen8_xcs_signal;
2582                         GEN8_RING_SEMAPHORE_INIT;
2583                 }
2584         } else {
2585                 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2586                 ring->irq_get = gen6_ring_get_irq;
2587                 ring->irq_put = gen6_ring_put_irq;
2588                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2589                 if (i915_semaphore_is_enabled(dev)) {
2590                         ring->semaphore.signal = gen6_signal;
2591                         ring->semaphore.sync_to = gen6_ring_sync;
2592                         /*
2593                          * The current semaphore is only applied on pre-gen8
2594                          * platform.  And there is no VCS2 ring on the pre-gen8
2595                          * platform. So the semaphore between BCS and VCS2 is
2596                          * initialized as INVALID.  Gen8 will initialize the
2597                          * sema between BCS and VCS2 later.
2598                          */
2599                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2600                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2601                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2602                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2603                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2604                         ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2605                         ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2606                         ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2607                         ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2608                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2609                 }
2610         }
2611         ring->init = init_ring_common;
2612
2613         return intel_init_ring_buffer(dev, ring);
2614 }
2615
2616 int intel_init_vebox_ring_buffer(struct drm_device *dev)
2617 {
2618         struct drm_i915_private *dev_priv = dev->dev_private;
2619         struct intel_engine_cs *ring = &dev_priv->ring[VECS];
2620
2621         ring->name = "video enhancement ring";
2622         ring->id = VECS;
2623
2624         ring->mmio_base = VEBOX_RING_BASE;
2625         ring->write_tail = ring_write_tail;
2626         ring->flush = gen6_ring_flush;
2627         ring->add_request = gen6_add_request;
2628         ring->get_seqno = gen6_ring_get_seqno;
2629         ring->set_seqno = ring_set_seqno;
2630
2631         if (INTEL_INFO(dev)->gen >= 8) {
2632                 ring->irq_enable_mask =
2633                         GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2634                 ring->irq_get = gen8_ring_get_irq;
2635                 ring->irq_put = gen8_ring_put_irq;
2636                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2637                 if (i915_semaphore_is_enabled(dev)) {
2638                         ring->semaphore.sync_to = gen8_ring_sync;
2639                         ring->semaphore.signal = gen8_xcs_signal;
2640                         GEN8_RING_SEMAPHORE_INIT;
2641                 }
2642         } else {
2643                 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2644                 ring->irq_get = hsw_vebox_get_irq;
2645                 ring->irq_put = hsw_vebox_put_irq;
2646                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2647                 if (i915_semaphore_is_enabled(dev)) {
2648                         ring->semaphore.sync_to = gen6_ring_sync;
2649                         ring->semaphore.signal = gen6_signal;
2650                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2651                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2652                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2653                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2654                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2655                         ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2656                         ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2657                         ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2658                         ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2659                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2660                 }
2661         }
2662         ring->init = init_ring_common;
2663
2664         return intel_init_ring_buffer(dev, ring);
2665 }
2666
2667 int
2668 intel_ring_flush_all_caches(struct intel_engine_cs *ring)
2669 {
2670         int ret;
2671
2672         if (!ring->gpu_caches_dirty)
2673                 return 0;
2674
2675         ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2676         if (ret)
2677                 return ret;
2678
2679         trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2680
2681         ring->gpu_caches_dirty = false;
2682         return 0;
2683 }
2684
2685 int
2686 intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
2687 {
2688         uint32_t flush_domains;
2689         int ret;
2690
2691         flush_domains = 0;
2692         if (ring->gpu_caches_dirty)
2693                 flush_domains = I915_GEM_GPU_DOMAINS;
2694
2695         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2696         if (ret)
2697                 return ret;
2698
2699         trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2700
2701         ring->gpu_caches_dirty = false;
2702         return 0;
2703 }
2704
2705 void
2706 intel_stop_ring_buffer(struct intel_engine_cs *ring)
2707 {
2708         int ret;
2709
2710         if (!intel_ring_initialized(ring))
2711                 return;
2712
2713         ret = intel_ring_idle(ring);
2714         if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
2715                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
2716                           ring->name, ret);
2717
2718         stop_ring(ring);
2719 }