Merge branch 'topic/ppgtt' into drm-intel-next-queued
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 static inline int ring_space(struct intel_ring_buffer *ring)
37 {
38         int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
39         if (space < 0)
40                 space += ring->size;
41         return space;
42 }
43
44 void __intel_ring_advance(struct intel_ring_buffer *ring)
45 {
46         struct drm_i915_private *dev_priv = ring->dev->dev_private;
47
48         ring->tail &= ring->size - 1;
49         if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
50                 return;
51         ring->write_tail(ring, ring->tail);
52 }
53
54 static int
55 gen2_render_ring_flush(struct intel_ring_buffer *ring,
56                        u32      invalidate_domains,
57                        u32      flush_domains)
58 {
59         u32 cmd;
60         int ret;
61
62         cmd = MI_FLUSH;
63         if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
64                 cmd |= MI_NO_WRITE_FLUSH;
65
66         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
67                 cmd |= MI_READ_FLUSH;
68
69         ret = intel_ring_begin(ring, 2);
70         if (ret)
71                 return ret;
72
73         intel_ring_emit(ring, cmd);
74         intel_ring_emit(ring, MI_NOOP);
75         intel_ring_advance(ring);
76
77         return 0;
78 }
79
80 static int
81 gen4_render_ring_flush(struct intel_ring_buffer *ring,
82                        u32      invalidate_domains,
83                        u32      flush_domains)
84 {
85         struct drm_device *dev = ring->dev;
86         u32 cmd;
87         int ret;
88
89         /*
90          * read/write caches:
91          *
92          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
93          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
94          * also flushed at 2d versus 3d pipeline switches.
95          *
96          * read-only caches:
97          *
98          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
99          * MI_READ_FLUSH is set, and is always flushed on 965.
100          *
101          * I915_GEM_DOMAIN_COMMAND may not exist?
102          *
103          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
104          * invalidated when MI_EXE_FLUSH is set.
105          *
106          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
107          * invalidated with every MI_FLUSH.
108          *
109          * TLBs:
110          *
111          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
112          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
113          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
114          * are flushed at any MI_FLUSH.
115          */
116
117         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
118         if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
119                 cmd &= ~MI_NO_WRITE_FLUSH;
120         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121                 cmd |= MI_EXE_FLUSH;
122
123         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
124             (IS_G4X(dev) || IS_GEN5(dev)))
125                 cmd |= MI_INVALIDATE_ISP;
126
127         ret = intel_ring_begin(ring, 2);
128         if (ret)
129                 return ret;
130
131         intel_ring_emit(ring, cmd);
132         intel_ring_emit(ring, MI_NOOP);
133         intel_ring_advance(ring);
134
135         return 0;
136 }
137
138 /**
139  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
140  * implementing two workarounds on gen6.  From section 1.4.7.1
141  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142  *
143  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
144  * produced by non-pipelined state commands), software needs to first
145  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
146  * 0.
147  *
148  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
149  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150  *
151  * And the workaround for these two requires this workaround first:
152  *
153  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
154  * BEFORE the pipe-control with a post-sync op and no write-cache
155  * flushes.
156  *
157  * And this last workaround is tricky because of the requirements on
158  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
159  * volume 2 part 1:
160  *
161  *     "1 of the following must also be set:
162  *      - Render Target Cache Flush Enable ([12] of DW1)
163  *      - Depth Cache Flush Enable ([0] of DW1)
164  *      - Stall at Pixel Scoreboard ([1] of DW1)
165  *      - Depth Stall ([13] of DW1)
166  *      - Post-Sync Operation ([13] of DW1)
167  *      - Notify Enable ([8] of DW1)"
168  *
169  * The cache flushes require the workaround flush that triggered this
170  * one, so we can't use it.  Depth stall would trigger the same.
171  * Post-sync nonzero is what triggered this second workaround, so we
172  * can't use that one either.  Notify enable is IRQs, which aren't
173  * really our business.  That leaves only stall at scoreboard.
174  */
175 static int
176 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177 {
178         u32 scratch_addr = ring->scratch.gtt_offset + 128;
179         int ret;
180
181
182         ret = intel_ring_begin(ring, 6);
183         if (ret)
184                 return ret;
185
186         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
188                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
189         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
190         intel_ring_emit(ring, 0); /* low dword */
191         intel_ring_emit(ring, 0); /* high dword */
192         intel_ring_emit(ring, MI_NOOP);
193         intel_ring_advance(ring);
194
195         ret = intel_ring_begin(ring, 6);
196         if (ret)
197                 return ret;
198
199         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
202         intel_ring_emit(ring, 0);
203         intel_ring_emit(ring, 0);
204         intel_ring_emit(ring, MI_NOOP);
205         intel_ring_advance(ring);
206
207         return 0;
208 }
209
210 static int
211 gen6_render_ring_flush(struct intel_ring_buffer *ring,
212                          u32 invalidate_domains, u32 flush_domains)
213 {
214         u32 flags = 0;
215         u32 scratch_addr = ring->scratch.gtt_offset + 128;
216         int ret;
217
218         /* Force SNB workarounds for PIPE_CONTROL flushes */
219         ret = intel_emit_post_sync_nonzero_flush(ring);
220         if (ret)
221                 return ret;
222
223         /* Just flush everything.  Experiments have shown that reducing the
224          * number of bits based on the write domains has little performance
225          * impact.
226          */
227         if (flush_domains) {
228                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
229                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
230                 /*
231                  * Ensure that any following seqno writes only happen
232                  * when the render cache is indeed flushed.
233                  */
234                 flags |= PIPE_CONTROL_CS_STALL;
235         }
236         if (invalidate_domains) {
237                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
238                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
239                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
240                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
241                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
242                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
243                 /*
244                  * TLB invalidate requires a post-sync write.
245                  */
246                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
247         }
248
249         ret = intel_ring_begin(ring, 4);
250         if (ret)
251                 return ret;
252
253         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
254         intel_ring_emit(ring, flags);
255         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
256         intel_ring_emit(ring, 0);
257         intel_ring_advance(ring);
258
259         return 0;
260 }
261
262 static int
263 gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
264 {
265         int ret;
266
267         ret = intel_ring_begin(ring, 4);
268         if (ret)
269                 return ret;
270
271         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
272         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
273                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
274         intel_ring_emit(ring, 0);
275         intel_ring_emit(ring, 0);
276         intel_ring_advance(ring);
277
278         return 0;
279 }
280
281 static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
282 {
283         int ret;
284
285         if (!ring->fbc_dirty)
286                 return 0;
287
288         ret = intel_ring_begin(ring, 6);
289         if (ret)
290                 return ret;
291         /* WaFbcNukeOn3DBlt:ivb/hsw */
292         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
293         intel_ring_emit(ring, MSG_FBC_REND_STATE);
294         intel_ring_emit(ring, value);
295         intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
296         intel_ring_emit(ring, MSG_FBC_REND_STATE);
297         intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
298         intel_ring_advance(ring);
299
300         ring->fbc_dirty = false;
301         return 0;
302 }
303
304 static int
305 gen7_render_ring_flush(struct intel_ring_buffer *ring,
306                        u32 invalidate_domains, u32 flush_domains)
307 {
308         u32 flags = 0;
309         u32 scratch_addr = ring->scratch.gtt_offset + 128;
310         int ret;
311
312         /*
313          * Ensure that any following seqno writes only happen when the render
314          * cache is indeed flushed.
315          *
316          * Workaround: 4th PIPE_CONTROL command (except the ones with only
317          * read-cache invalidate bits set) must have the CS_STALL bit set. We
318          * don't try to be clever and just set it unconditionally.
319          */
320         flags |= PIPE_CONTROL_CS_STALL;
321
322         /* Just flush everything.  Experiments have shown that reducing the
323          * number of bits based on the write domains has little performance
324          * impact.
325          */
326         if (flush_domains) {
327                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
328                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
329         }
330         if (invalidate_domains) {
331                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
332                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
333                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
334                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
335                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
336                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
337                 /*
338                  * TLB invalidate requires a post-sync write.
339                  */
340                 flags |= PIPE_CONTROL_QW_WRITE;
341                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
342
343                 /* Workaround: we must issue a pipe_control with CS-stall bit
344                  * set before a pipe_control command that has the state cache
345                  * invalidate bit set. */
346                 gen7_render_ring_cs_stall_wa(ring);
347         }
348
349         ret = intel_ring_begin(ring, 4);
350         if (ret)
351                 return ret;
352
353         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
354         intel_ring_emit(ring, flags);
355         intel_ring_emit(ring, scratch_addr);
356         intel_ring_emit(ring, 0);
357         intel_ring_advance(ring);
358
359         if (!invalidate_domains && flush_domains)
360                 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
361
362         return 0;
363 }
364
365 static int
366 gen8_render_ring_flush(struct intel_ring_buffer *ring,
367                        u32 invalidate_domains, u32 flush_domains)
368 {
369         u32 flags = 0;
370         u32 scratch_addr = ring->scratch.gtt_offset + 128;
371         int ret;
372
373         flags |= PIPE_CONTROL_CS_STALL;
374
375         if (flush_domains) {
376                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
377                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
378         }
379         if (invalidate_domains) {
380                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
381                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
382                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
383                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
384                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
385                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
386                 flags |= PIPE_CONTROL_QW_WRITE;
387                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
388         }
389
390         ret = intel_ring_begin(ring, 6);
391         if (ret)
392                 return ret;
393
394         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
395         intel_ring_emit(ring, flags);
396         intel_ring_emit(ring, scratch_addr);
397         intel_ring_emit(ring, 0);
398         intel_ring_emit(ring, 0);
399         intel_ring_emit(ring, 0);
400         intel_ring_advance(ring);
401
402         return 0;
403
404 }
405
406 static void ring_write_tail(struct intel_ring_buffer *ring,
407                             u32 value)
408 {
409         drm_i915_private_t *dev_priv = ring->dev->dev_private;
410         I915_WRITE_TAIL(ring, value);
411 }
412
413 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
414 {
415         drm_i915_private_t *dev_priv = ring->dev->dev_private;
416         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
417                         RING_ACTHD(ring->mmio_base) : ACTHD;
418
419         return I915_READ(acthd_reg);
420 }
421
422 static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
423 {
424         struct drm_i915_private *dev_priv = ring->dev->dev_private;
425         u32 addr;
426
427         addr = dev_priv->status_page_dmah->busaddr;
428         if (INTEL_INFO(ring->dev)->gen >= 4)
429                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
430         I915_WRITE(HWS_PGA, addr);
431 }
432
433 static int init_ring_common(struct intel_ring_buffer *ring)
434 {
435         struct drm_device *dev = ring->dev;
436         drm_i915_private_t *dev_priv = dev->dev_private;
437         struct drm_i915_gem_object *obj = ring->obj;
438         int ret = 0;
439         u32 head;
440
441         gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
442
443         if (I915_NEED_GFX_HWS(dev))
444                 intel_ring_setup_status_page(ring);
445         else
446                 ring_setup_phys_status_page(ring);
447
448         /* Stop the ring if it's running. */
449         I915_WRITE_CTL(ring, 0);
450         I915_WRITE_HEAD(ring, 0);
451         ring->write_tail(ring, 0);
452
453         head = I915_READ_HEAD(ring) & HEAD_ADDR;
454
455         /* G45 ring initialization fails to reset head to zero */
456         if (head != 0) {
457                 DRM_DEBUG_KMS("%s head not reset to zero "
458                               "ctl %08x head %08x tail %08x start %08x\n",
459                               ring->name,
460                               I915_READ_CTL(ring),
461                               I915_READ_HEAD(ring),
462                               I915_READ_TAIL(ring),
463                               I915_READ_START(ring));
464
465                 I915_WRITE_HEAD(ring, 0);
466
467                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
468                         DRM_ERROR("failed to set %s head to zero "
469                                   "ctl %08x head %08x tail %08x start %08x\n",
470                                   ring->name,
471                                   I915_READ_CTL(ring),
472                                   I915_READ_HEAD(ring),
473                                   I915_READ_TAIL(ring),
474                                   I915_READ_START(ring));
475                 }
476         }
477
478         /* Initialize the ring. This must happen _after_ we've cleared the ring
479          * registers with the above sequence (the readback of the HEAD registers
480          * also enforces ordering), otherwise the hw might lose the new ring
481          * register values. */
482         I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
483         I915_WRITE_CTL(ring,
484                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
485                         | RING_VALID);
486
487         /* If the head is still not zero, the ring is dead */
488         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
489                      I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
490                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
491                 DRM_ERROR("%s initialization failed "
492                                 "ctl %08x head %08x tail %08x start %08x\n",
493                                 ring->name,
494                                 I915_READ_CTL(ring),
495                                 I915_READ_HEAD(ring),
496                                 I915_READ_TAIL(ring),
497                                 I915_READ_START(ring));
498                 ret = -EIO;
499                 goto out;
500         }
501
502         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
503                 i915_kernel_lost_context(ring->dev);
504         else {
505                 ring->head = I915_READ_HEAD(ring);
506                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
507                 ring->space = ring_space(ring);
508                 ring->last_retired_head = -1;
509         }
510
511         memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
512
513 out:
514         gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
515
516         return ret;
517 }
518
519 static int
520 init_pipe_control(struct intel_ring_buffer *ring)
521 {
522         int ret;
523
524         if (ring->scratch.obj)
525                 return 0;
526
527         ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
528         if (ring->scratch.obj == NULL) {
529                 DRM_ERROR("Failed to allocate seqno page\n");
530                 ret = -ENOMEM;
531                 goto err;
532         }
533
534         i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
535
536         ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
537         if (ret)
538                 goto err_unref;
539
540         ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
541         ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
542         if (ring->scratch.cpu_page == NULL) {
543                 ret = -ENOMEM;
544                 goto err_unpin;
545         }
546
547         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
548                          ring->name, ring->scratch.gtt_offset);
549         return 0;
550
551 err_unpin:
552         i915_gem_object_ggtt_unpin(ring->scratch.obj);
553 err_unref:
554         drm_gem_object_unreference(&ring->scratch.obj->base);
555 err:
556         return ret;
557 }
558
559 static int init_render_ring(struct intel_ring_buffer *ring)
560 {
561         struct drm_device *dev = ring->dev;
562         struct drm_i915_private *dev_priv = dev->dev_private;
563         int ret = init_ring_common(ring);
564
565         if (INTEL_INFO(dev)->gen > 3)
566                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
567
568         /* We need to disable the AsyncFlip performance optimisations in order
569          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
570          * programmed to '1' on all products.
571          *
572          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
573          */
574         if (INTEL_INFO(dev)->gen >= 6)
575                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
576
577         /* Required for the hardware to program scanline values for waiting */
578         if (INTEL_INFO(dev)->gen == 6)
579                 I915_WRITE(GFX_MODE,
580                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
581
582         if (IS_GEN7(dev))
583                 I915_WRITE(GFX_MODE_GEN7,
584                            _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
585                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
586
587         if (INTEL_INFO(dev)->gen >= 5) {
588                 ret = init_pipe_control(ring);
589                 if (ret)
590                         return ret;
591         }
592
593         if (IS_GEN6(dev)) {
594                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
595                  * "If this bit is set, STCunit will have LRA as replacement
596                  *  policy. [...] This bit must be reset.  LRA replacement
597                  *  policy is not supported."
598                  */
599                 I915_WRITE(CACHE_MODE_0,
600                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
601
602                 /* This is not explicitly set for GEN6, so read the register.
603                  * see intel_ring_mi_set_context() for why we care.
604                  * TODO: consider explicitly setting the bit for GEN5
605                  */
606                 ring->itlb_before_ctx_switch =
607                         !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
608         }
609
610         if (INTEL_INFO(dev)->gen >= 6)
611                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
612
613         if (HAS_L3_DPF(dev))
614                 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
615
616         return ret;
617 }
618
619 static void render_ring_cleanup(struct intel_ring_buffer *ring)
620 {
621         struct drm_device *dev = ring->dev;
622
623         if (ring->scratch.obj == NULL)
624                 return;
625
626         if (INTEL_INFO(dev)->gen >= 5) {
627                 kunmap(sg_page(ring->scratch.obj->pages->sgl));
628                 i915_gem_object_ggtt_unpin(ring->scratch.obj);
629         }
630
631         drm_gem_object_unreference(&ring->scratch.obj->base);
632         ring->scratch.obj = NULL;
633 }
634
635 static void
636 update_mboxes(struct intel_ring_buffer *ring,
637               u32 mmio_offset)
638 {
639 /* NB: In order to be able to do semaphore MBOX updates for varying number
640  * of rings, it's easiest if we round up each individual update to a
641  * multiple of 2 (since ring updates must always be a multiple of 2)
642  * even though the actual update only requires 3 dwords.
643  */
644 #define MBOX_UPDATE_DWORDS 4
645         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
646         intel_ring_emit(ring, mmio_offset);
647         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
648         intel_ring_emit(ring, MI_NOOP);
649 }
650
651 /**
652  * gen6_add_request - Update the semaphore mailbox registers
653  * 
654  * @ring - ring that is adding a request
655  * @seqno - return seqno stuck into the ring
656  *
657  * Update the mailbox registers in the *other* rings with the current seqno.
658  * This acts like a signal in the canonical semaphore.
659  */
660 static int
661 gen6_add_request(struct intel_ring_buffer *ring)
662 {
663         struct drm_device *dev = ring->dev;
664         struct drm_i915_private *dev_priv = dev->dev_private;
665         struct intel_ring_buffer *useless;
666         int i, ret, num_dwords = 4;
667
668         if (i915_semaphore_is_enabled(dev))
669                 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
670 #undef MBOX_UPDATE_DWORDS
671
672         ret = intel_ring_begin(ring, num_dwords);
673         if (ret)
674                 return ret;
675
676         if (i915_semaphore_is_enabled(dev)) {
677                 for_each_ring(useless, dev_priv, i) {
678                         u32 mbox_reg = ring->signal_mbox[i];
679                         if (mbox_reg != GEN6_NOSYNC)
680                                 update_mboxes(ring, mbox_reg);
681                 }
682         }
683
684         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
685         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
686         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
687         intel_ring_emit(ring, MI_USER_INTERRUPT);
688         __intel_ring_advance(ring);
689
690         return 0;
691 }
692
693 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
694                                               u32 seqno)
695 {
696         struct drm_i915_private *dev_priv = dev->dev_private;
697         return dev_priv->last_seqno < seqno;
698 }
699
700 /**
701  * intel_ring_sync - sync the waiter to the signaller on seqno
702  *
703  * @waiter - ring that is waiting
704  * @signaller - ring which has, or will signal
705  * @seqno - seqno which the waiter will block on
706  */
707 static int
708 gen6_ring_sync(struct intel_ring_buffer *waiter,
709                struct intel_ring_buffer *signaller,
710                u32 seqno)
711 {
712         int ret;
713         u32 dw1 = MI_SEMAPHORE_MBOX |
714                   MI_SEMAPHORE_COMPARE |
715                   MI_SEMAPHORE_REGISTER;
716
717         /* Throughout all of the GEM code, seqno passed implies our current
718          * seqno is >= the last seqno executed. However for hardware the
719          * comparison is strictly greater than.
720          */
721         seqno -= 1;
722
723         WARN_ON(signaller->semaphore_register[waiter->id] ==
724                 MI_SEMAPHORE_SYNC_INVALID);
725
726         ret = intel_ring_begin(waiter, 4);
727         if (ret)
728                 return ret;
729
730         /* If seqno wrap happened, omit the wait with no-ops */
731         if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
732                 intel_ring_emit(waiter,
733                                 dw1 |
734                                 signaller->semaphore_register[waiter->id]);
735                 intel_ring_emit(waiter, seqno);
736                 intel_ring_emit(waiter, 0);
737                 intel_ring_emit(waiter, MI_NOOP);
738         } else {
739                 intel_ring_emit(waiter, MI_NOOP);
740                 intel_ring_emit(waiter, MI_NOOP);
741                 intel_ring_emit(waiter, MI_NOOP);
742                 intel_ring_emit(waiter, MI_NOOP);
743         }
744         intel_ring_advance(waiter);
745
746         return 0;
747 }
748
749 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
750 do {                                                                    \
751         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
752                  PIPE_CONTROL_DEPTH_STALL);                             \
753         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
754         intel_ring_emit(ring__, 0);                                                     \
755         intel_ring_emit(ring__, 0);                                                     \
756 } while (0)
757
758 static int
759 pc_render_add_request(struct intel_ring_buffer *ring)
760 {
761         u32 scratch_addr = ring->scratch.gtt_offset + 128;
762         int ret;
763
764         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
765          * incoherent with writes to memory, i.e. completely fubar,
766          * so we need to use PIPE_NOTIFY instead.
767          *
768          * However, we also need to workaround the qword write
769          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
770          * memory before requesting an interrupt.
771          */
772         ret = intel_ring_begin(ring, 32);
773         if (ret)
774                 return ret;
775
776         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
777                         PIPE_CONTROL_WRITE_FLUSH |
778                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
779         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
780         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
781         intel_ring_emit(ring, 0);
782         PIPE_CONTROL_FLUSH(ring, scratch_addr);
783         scratch_addr += 128; /* write to separate cachelines */
784         PIPE_CONTROL_FLUSH(ring, scratch_addr);
785         scratch_addr += 128;
786         PIPE_CONTROL_FLUSH(ring, scratch_addr);
787         scratch_addr += 128;
788         PIPE_CONTROL_FLUSH(ring, scratch_addr);
789         scratch_addr += 128;
790         PIPE_CONTROL_FLUSH(ring, scratch_addr);
791         scratch_addr += 128;
792         PIPE_CONTROL_FLUSH(ring, scratch_addr);
793
794         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
795                         PIPE_CONTROL_WRITE_FLUSH |
796                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
797                         PIPE_CONTROL_NOTIFY);
798         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
799         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
800         intel_ring_emit(ring, 0);
801         __intel_ring_advance(ring);
802
803         return 0;
804 }
805
806 static u32
807 gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
808 {
809         /* Workaround to force correct ordering between irq and seqno writes on
810          * ivb (and maybe also on snb) by reading from a CS register (like
811          * ACTHD) before reading the status page. */
812         if (!lazy_coherency)
813                 intel_ring_get_active_head(ring);
814         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
815 }
816
817 static u32
818 ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
819 {
820         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
821 }
822
823 static void
824 ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
825 {
826         intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
827 }
828
829 static u32
830 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
831 {
832         return ring->scratch.cpu_page[0];
833 }
834
835 static void
836 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
837 {
838         ring->scratch.cpu_page[0] = seqno;
839 }
840
841 static bool
842 gen5_ring_get_irq(struct intel_ring_buffer *ring)
843 {
844         struct drm_device *dev = ring->dev;
845         drm_i915_private_t *dev_priv = dev->dev_private;
846         unsigned long flags;
847
848         if (!dev->irq_enabled)
849                 return false;
850
851         spin_lock_irqsave(&dev_priv->irq_lock, flags);
852         if (ring->irq_refcount++ == 0)
853                 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
854         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
855
856         return true;
857 }
858
859 static void
860 gen5_ring_put_irq(struct intel_ring_buffer *ring)
861 {
862         struct drm_device *dev = ring->dev;
863         drm_i915_private_t *dev_priv = dev->dev_private;
864         unsigned long flags;
865
866         spin_lock_irqsave(&dev_priv->irq_lock, flags);
867         if (--ring->irq_refcount == 0)
868                 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
869         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
870 }
871
872 static bool
873 i9xx_ring_get_irq(struct intel_ring_buffer *ring)
874 {
875         struct drm_device *dev = ring->dev;
876         drm_i915_private_t *dev_priv = dev->dev_private;
877         unsigned long flags;
878
879         if (!dev->irq_enabled)
880                 return false;
881
882         spin_lock_irqsave(&dev_priv->irq_lock, flags);
883         if (ring->irq_refcount++ == 0) {
884                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
885                 I915_WRITE(IMR, dev_priv->irq_mask);
886                 POSTING_READ(IMR);
887         }
888         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
889
890         return true;
891 }
892
893 static void
894 i9xx_ring_put_irq(struct intel_ring_buffer *ring)
895 {
896         struct drm_device *dev = ring->dev;
897         drm_i915_private_t *dev_priv = dev->dev_private;
898         unsigned long flags;
899
900         spin_lock_irqsave(&dev_priv->irq_lock, flags);
901         if (--ring->irq_refcount == 0) {
902                 dev_priv->irq_mask |= ring->irq_enable_mask;
903                 I915_WRITE(IMR, dev_priv->irq_mask);
904                 POSTING_READ(IMR);
905         }
906         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
907 }
908
909 static bool
910 i8xx_ring_get_irq(struct intel_ring_buffer *ring)
911 {
912         struct drm_device *dev = ring->dev;
913         drm_i915_private_t *dev_priv = dev->dev_private;
914         unsigned long flags;
915
916         if (!dev->irq_enabled)
917                 return false;
918
919         spin_lock_irqsave(&dev_priv->irq_lock, flags);
920         if (ring->irq_refcount++ == 0) {
921                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
922                 I915_WRITE16(IMR, dev_priv->irq_mask);
923                 POSTING_READ16(IMR);
924         }
925         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
926
927         return true;
928 }
929
930 static void
931 i8xx_ring_put_irq(struct intel_ring_buffer *ring)
932 {
933         struct drm_device *dev = ring->dev;
934         drm_i915_private_t *dev_priv = dev->dev_private;
935         unsigned long flags;
936
937         spin_lock_irqsave(&dev_priv->irq_lock, flags);
938         if (--ring->irq_refcount == 0) {
939                 dev_priv->irq_mask |= ring->irq_enable_mask;
940                 I915_WRITE16(IMR, dev_priv->irq_mask);
941                 POSTING_READ16(IMR);
942         }
943         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
944 }
945
946 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
947 {
948         struct drm_device *dev = ring->dev;
949         drm_i915_private_t *dev_priv = ring->dev->dev_private;
950         u32 mmio = 0;
951
952         /* The ring status page addresses are no longer next to the rest of
953          * the ring registers as of gen7.
954          */
955         if (IS_GEN7(dev)) {
956                 switch (ring->id) {
957                 case RCS:
958                         mmio = RENDER_HWS_PGA_GEN7;
959                         break;
960                 case BCS:
961                         mmio = BLT_HWS_PGA_GEN7;
962                         break;
963                 case VCS:
964                         mmio = BSD_HWS_PGA_GEN7;
965                         break;
966                 case VECS:
967                         mmio = VEBOX_HWS_PGA_GEN7;
968                         break;
969                 }
970         } else if (IS_GEN6(ring->dev)) {
971                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
972         } else {
973                 /* XXX: gen8 returns to sanity */
974                 mmio = RING_HWS_PGA(ring->mmio_base);
975         }
976
977         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
978         POSTING_READ(mmio);
979
980         /* Flush the TLB for this page */
981         if (INTEL_INFO(dev)->gen >= 6) {
982                 u32 reg = RING_INSTPM(ring->mmio_base);
983                 I915_WRITE(reg,
984                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
985                                               INSTPM_SYNC_FLUSH));
986                 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
987                              1000))
988                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
989                                   ring->name);
990         }
991 }
992
993 static int
994 bsd_ring_flush(struct intel_ring_buffer *ring,
995                u32     invalidate_domains,
996                u32     flush_domains)
997 {
998         int ret;
999
1000         ret = intel_ring_begin(ring, 2);
1001         if (ret)
1002                 return ret;
1003
1004         intel_ring_emit(ring, MI_FLUSH);
1005         intel_ring_emit(ring, MI_NOOP);
1006         intel_ring_advance(ring);
1007         return 0;
1008 }
1009
1010 static int
1011 i9xx_add_request(struct intel_ring_buffer *ring)
1012 {
1013         int ret;
1014
1015         ret = intel_ring_begin(ring, 4);
1016         if (ret)
1017                 return ret;
1018
1019         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1020         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1021         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1022         intel_ring_emit(ring, MI_USER_INTERRUPT);
1023         __intel_ring_advance(ring);
1024
1025         return 0;
1026 }
1027
1028 static bool
1029 gen6_ring_get_irq(struct intel_ring_buffer *ring)
1030 {
1031         struct drm_device *dev = ring->dev;
1032         drm_i915_private_t *dev_priv = dev->dev_private;
1033         unsigned long flags;
1034
1035         if (!dev->irq_enabled)
1036                return false;
1037
1038         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1039         if (ring->irq_refcount++ == 0) {
1040                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1041                         I915_WRITE_IMR(ring,
1042                                        ~(ring->irq_enable_mask |
1043                                          GT_PARITY_ERROR(dev)));
1044                 else
1045                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1046                 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1047         }
1048         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1049
1050         return true;
1051 }
1052
1053 static void
1054 gen6_ring_put_irq(struct intel_ring_buffer *ring)
1055 {
1056         struct drm_device *dev = ring->dev;
1057         drm_i915_private_t *dev_priv = dev->dev_private;
1058         unsigned long flags;
1059
1060         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1061         if (--ring->irq_refcount == 0) {
1062                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1063                         I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1064                 else
1065                         I915_WRITE_IMR(ring, ~0);
1066                 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1067         }
1068         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1069 }
1070
1071 static bool
1072 hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1073 {
1074         struct drm_device *dev = ring->dev;
1075         struct drm_i915_private *dev_priv = dev->dev_private;
1076         unsigned long flags;
1077
1078         if (!dev->irq_enabled)
1079                 return false;
1080
1081         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1082         if (ring->irq_refcount++ == 0) {
1083                 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1084                 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1085         }
1086         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1087
1088         return true;
1089 }
1090
1091 static void
1092 hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1093 {
1094         struct drm_device *dev = ring->dev;
1095         struct drm_i915_private *dev_priv = dev->dev_private;
1096         unsigned long flags;
1097
1098         if (!dev->irq_enabled)
1099                 return;
1100
1101         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1102         if (--ring->irq_refcount == 0) {
1103                 I915_WRITE_IMR(ring, ~0);
1104                 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1105         }
1106         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1107 }
1108
1109 static bool
1110 gen8_ring_get_irq(struct intel_ring_buffer *ring)
1111 {
1112         struct drm_device *dev = ring->dev;
1113         struct drm_i915_private *dev_priv = dev->dev_private;
1114         unsigned long flags;
1115
1116         if (!dev->irq_enabled)
1117                 return false;
1118
1119         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1120         if (ring->irq_refcount++ == 0) {
1121                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1122                         I915_WRITE_IMR(ring,
1123                                        ~(ring->irq_enable_mask |
1124                                          GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1125                 } else {
1126                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1127                 }
1128                 POSTING_READ(RING_IMR(ring->mmio_base));
1129         }
1130         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1131
1132         return true;
1133 }
1134
1135 static void
1136 gen8_ring_put_irq(struct intel_ring_buffer *ring)
1137 {
1138         struct drm_device *dev = ring->dev;
1139         struct drm_i915_private *dev_priv = dev->dev_private;
1140         unsigned long flags;
1141
1142         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1143         if (--ring->irq_refcount == 0) {
1144                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1145                         I915_WRITE_IMR(ring,
1146                                        ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1147                 } else {
1148                         I915_WRITE_IMR(ring, ~0);
1149                 }
1150                 POSTING_READ(RING_IMR(ring->mmio_base));
1151         }
1152         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1153 }
1154
1155 static int
1156 i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1157                          u32 offset, u32 length,
1158                          unsigned flags)
1159 {
1160         int ret;
1161
1162         ret = intel_ring_begin(ring, 2);
1163         if (ret)
1164                 return ret;
1165
1166         intel_ring_emit(ring,
1167                         MI_BATCH_BUFFER_START |
1168                         MI_BATCH_GTT |
1169                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1170         intel_ring_emit(ring, offset);
1171         intel_ring_advance(ring);
1172
1173         return 0;
1174 }
1175
1176 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1177 #define I830_BATCH_LIMIT (256*1024)
1178 static int
1179 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1180                                 u32 offset, u32 len,
1181                                 unsigned flags)
1182 {
1183         int ret;
1184
1185         if (flags & I915_DISPATCH_PINNED) {
1186                 ret = intel_ring_begin(ring, 4);
1187                 if (ret)
1188                         return ret;
1189
1190                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1191                 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1192                 intel_ring_emit(ring, offset + len - 8);
1193                 intel_ring_emit(ring, MI_NOOP);
1194                 intel_ring_advance(ring);
1195         } else {
1196                 u32 cs_offset = ring->scratch.gtt_offset;
1197
1198                 if (len > I830_BATCH_LIMIT)
1199                         return -ENOSPC;
1200
1201                 ret = intel_ring_begin(ring, 9+3);
1202                 if (ret)
1203                         return ret;
1204                 /* Blit the batch (which has now all relocs applied) to the stable batch
1205                  * scratch bo area (so that the CS never stumbles over its tlb
1206                  * invalidation bug) ... */
1207                 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1208                                 XY_SRC_COPY_BLT_WRITE_ALPHA |
1209                                 XY_SRC_COPY_BLT_WRITE_RGB);
1210                 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1211                 intel_ring_emit(ring, 0);
1212                 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1213                 intel_ring_emit(ring, cs_offset);
1214                 intel_ring_emit(ring, 0);
1215                 intel_ring_emit(ring, 4096);
1216                 intel_ring_emit(ring, offset);
1217                 intel_ring_emit(ring, MI_FLUSH);
1218
1219                 /* ... and execute it. */
1220                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1221                 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1222                 intel_ring_emit(ring, cs_offset + len - 8);
1223                 intel_ring_advance(ring);
1224         }
1225
1226         return 0;
1227 }
1228
1229 static int
1230 i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1231                          u32 offset, u32 len,
1232                          unsigned flags)
1233 {
1234         int ret;
1235
1236         ret = intel_ring_begin(ring, 2);
1237         if (ret)
1238                 return ret;
1239
1240         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1241         intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1242         intel_ring_advance(ring);
1243
1244         return 0;
1245 }
1246
1247 static void cleanup_status_page(struct intel_ring_buffer *ring)
1248 {
1249         struct drm_i915_gem_object *obj;
1250
1251         obj = ring->status_page.obj;
1252         if (obj == NULL)
1253                 return;
1254
1255         kunmap(sg_page(obj->pages->sgl));
1256         i915_gem_object_ggtt_unpin(obj);
1257         drm_gem_object_unreference(&obj->base);
1258         ring->status_page.obj = NULL;
1259 }
1260
1261 static int init_status_page(struct intel_ring_buffer *ring)
1262 {
1263         struct drm_device *dev = ring->dev;
1264         struct drm_i915_gem_object *obj;
1265         int ret;
1266
1267         obj = i915_gem_alloc_object(dev, 4096);
1268         if (obj == NULL) {
1269                 DRM_ERROR("Failed to allocate status page\n");
1270                 ret = -ENOMEM;
1271                 goto err;
1272         }
1273
1274         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1275
1276         ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
1277         if (ret != 0) {
1278                 goto err_unref;
1279         }
1280
1281         ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1282         ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1283         if (ring->status_page.page_addr == NULL) {
1284                 ret = -ENOMEM;
1285                 goto err_unpin;
1286         }
1287         ring->status_page.obj = obj;
1288         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1289
1290         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1291                         ring->name, ring->status_page.gfx_addr);
1292
1293         return 0;
1294
1295 err_unpin:
1296         i915_gem_object_ggtt_unpin(obj);
1297 err_unref:
1298         drm_gem_object_unreference(&obj->base);
1299 err:
1300         return ret;
1301 }
1302
1303 static int init_phys_status_page(struct intel_ring_buffer *ring)
1304 {
1305         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1306
1307         if (!dev_priv->status_page_dmah) {
1308                 dev_priv->status_page_dmah =
1309                         drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1310                 if (!dev_priv->status_page_dmah)
1311                         return -ENOMEM;
1312         }
1313
1314         ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1315         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1316
1317         return 0;
1318 }
1319
1320 static int intel_init_ring_buffer(struct drm_device *dev,
1321                                   struct intel_ring_buffer *ring)
1322 {
1323         struct drm_i915_gem_object *obj;
1324         struct drm_i915_private *dev_priv = dev->dev_private;
1325         int ret;
1326
1327         ring->dev = dev;
1328         INIT_LIST_HEAD(&ring->active_list);
1329         INIT_LIST_HEAD(&ring->request_list);
1330         ring->size = 32 * PAGE_SIZE;
1331         memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1332
1333         init_waitqueue_head(&ring->irq_queue);
1334
1335         if (I915_NEED_GFX_HWS(dev)) {
1336                 ret = init_status_page(ring);
1337                 if (ret)
1338                         return ret;
1339         } else {
1340                 BUG_ON(ring->id != RCS);
1341                 ret = init_phys_status_page(ring);
1342                 if (ret)
1343                         return ret;
1344         }
1345
1346         obj = NULL;
1347         if (!HAS_LLC(dev))
1348                 obj = i915_gem_object_create_stolen(dev, ring->size);
1349         if (obj == NULL)
1350                 obj = i915_gem_alloc_object(dev, ring->size);
1351         if (obj == NULL) {
1352                 DRM_ERROR("Failed to allocate ringbuffer\n");
1353                 ret = -ENOMEM;
1354                 goto err_hws;
1355         }
1356
1357         ring->obj = obj;
1358
1359         ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
1360         if (ret)
1361                 goto err_unref;
1362
1363         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1364         if (ret)
1365                 goto err_unpin;
1366
1367         ring->virtual_start =
1368                 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1369                            ring->size);
1370         if (ring->virtual_start == NULL) {
1371                 DRM_ERROR("Failed to map ringbuffer.\n");
1372                 ret = -EINVAL;
1373                 goto err_unpin;
1374         }
1375
1376         ret = ring->init(ring);
1377         if (ret)
1378                 goto err_unmap;
1379
1380         /* Workaround an erratum on the i830 which causes a hang if
1381          * the TAIL pointer points to within the last 2 cachelines
1382          * of the buffer.
1383          */
1384         ring->effective_size = ring->size;
1385         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1386                 ring->effective_size -= 128;
1387
1388         return 0;
1389
1390 err_unmap:
1391         iounmap(ring->virtual_start);
1392 err_unpin:
1393         i915_gem_object_ggtt_unpin(obj);
1394 err_unref:
1395         drm_gem_object_unreference(&obj->base);
1396         ring->obj = NULL;
1397 err_hws:
1398         cleanup_status_page(ring);
1399         return ret;
1400 }
1401
1402 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1403 {
1404         struct drm_i915_private *dev_priv;
1405         int ret;
1406
1407         if (ring->obj == NULL)
1408                 return;
1409
1410         /* Disable the ring buffer. The ring must be idle at this point */
1411         dev_priv = ring->dev->dev_private;
1412         ret = intel_ring_idle(ring);
1413         if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1414                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1415                           ring->name, ret);
1416
1417         I915_WRITE_CTL(ring, 0);
1418
1419         iounmap(ring->virtual_start);
1420
1421         i915_gem_object_ggtt_unpin(ring->obj);
1422         drm_gem_object_unreference(&ring->obj->base);
1423         ring->obj = NULL;
1424         ring->preallocated_lazy_request = NULL;
1425         ring->outstanding_lazy_seqno = 0;
1426
1427         if (ring->cleanup)
1428                 ring->cleanup(ring);
1429
1430         cleanup_status_page(ring);
1431 }
1432
1433 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1434 {
1435         int ret;
1436
1437         ret = i915_wait_seqno(ring, seqno);
1438         if (!ret)
1439                 i915_gem_retire_requests_ring(ring);
1440
1441         return ret;
1442 }
1443
1444 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1445 {
1446         struct drm_i915_gem_request *request;
1447         u32 seqno = 0;
1448         int ret;
1449
1450         i915_gem_retire_requests_ring(ring);
1451
1452         if (ring->last_retired_head != -1) {
1453                 ring->head = ring->last_retired_head;
1454                 ring->last_retired_head = -1;
1455                 ring->space = ring_space(ring);
1456                 if (ring->space >= n)
1457                         return 0;
1458         }
1459
1460         list_for_each_entry(request, &ring->request_list, list) {
1461                 int space;
1462
1463                 if (request->tail == -1)
1464                         continue;
1465
1466                 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1467                 if (space < 0)
1468                         space += ring->size;
1469                 if (space >= n) {
1470                         seqno = request->seqno;
1471                         break;
1472                 }
1473
1474                 /* Consume this request in case we need more space than
1475                  * is available and so need to prevent a race between
1476                  * updating last_retired_head and direct reads of
1477                  * I915_RING_HEAD. It also provides a nice sanity check.
1478                  */
1479                 request->tail = -1;
1480         }
1481
1482         if (seqno == 0)
1483                 return -ENOSPC;
1484
1485         ret = intel_ring_wait_seqno(ring, seqno);
1486         if (ret)
1487                 return ret;
1488
1489         if (WARN_ON(ring->last_retired_head == -1))
1490                 return -ENOSPC;
1491
1492         ring->head = ring->last_retired_head;
1493         ring->last_retired_head = -1;
1494         ring->space = ring_space(ring);
1495         if (WARN_ON(ring->space < n))
1496                 return -ENOSPC;
1497
1498         return 0;
1499 }
1500
1501 static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1502 {
1503         struct drm_device *dev = ring->dev;
1504         struct drm_i915_private *dev_priv = dev->dev_private;
1505         unsigned long end;
1506         int ret;
1507
1508         ret = intel_ring_wait_request(ring, n);
1509         if (ret != -ENOSPC)
1510                 return ret;
1511
1512         /* force the tail write in case we have been skipping them */
1513         __intel_ring_advance(ring);
1514
1515         trace_i915_ring_wait_begin(ring);
1516         /* With GEM the hangcheck timer should kick us out of the loop,
1517          * leaving it early runs the risk of corrupting GEM state (due
1518          * to running on almost untested codepaths). But on resume
1519          * timers don't work yet, so prevent a complete hang in that
1520          * case by choosing an insanely large timeout. */
1521         end = jiffies + 60 * HZ;
1522
1523         do {
1524                 ring->head = I915_READ_HEAD(ring);
1525                 ring->space = ring_space(ring);
1526                 if (ring->space >= n) {
1527                         trace_i915_ring_wait_end(ring);
1528                         return 0;
1529                 }
1530
1531                 if (dev->primary->master) {
1532                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1533                         if (master_priv->sarea_priv)
1534                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1535                 }
1536
1537                 msleep(1);
1538
1539                 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1540                                            dev_priv->mm.interruptible);
1541                 if (ret)
1542                         return ret;
1543         } while (!time_after(jiffies, end));
1544         trace_i915_ring_wait_end(ring);
1545         return -EBUSY;
1546 }
1547
1548 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1549 {
1550         uint32_t __iomem *virt;
1551         int rem = ring->size - ring->tail;
1552
1553         if (ring->space < rem) {
1554                 int ret = ring_wait_for_space(ring, rem);
1555                 if (ret)
1556                         return ret;
1557         }
1558
1559         virt = ring->virtual_start + ring->tail;
1560         rem /= 4;
1561         while (rem--)
1562                 iowrite32(MI_NOOP, virt++);
1563
1564         ring->tail = 0;
1565         ring->space = ring_space(ring);
1566
1567         return 0;
1568 }
1569
1570 int intel_ring_idle(struct intel_ring_buffer *ring)
1571 {
1572         u32 seqno;
1573         int ret;
1574
1575         /* We need to add any requests required to flush the objects and ring */
1576         if (ring->outstanding_lazy_seqno) {
1577                 ret = i915_add_request(ring, NULL);
1578                 if (ret)
1579                         return ret;
1580         }
1581
1582         /* Wait upon the last request to be completed */
1583         if (list_empty(&ring->request_list))
1584                 return 0;
1585
1586         seqno = list_entry(ring->request_list.prev,
1587                            struct drm_i915_gem_request,
1588                            list)->seqno;
1589
1590         return i915_wait_seqno(ring, seqno);
1591 }
1592
1593 static int
1594 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1595 {
1596         if (ring->outstanding_lazy_seqno)
1597                 return 0;
1598
1599         if (ring->preallocated_lazy_request == NULL) {
1600                 struct drm_i915_gem_request *request;
1601
1602                 request = kmalloc(sizeof(*request), GFP_KERNEL);
1603                 if (request == NULL)
1604                         return -ENOMEM;
1605
1606                 ring->preallocated_lazy_request = request;
1607         }
1608
1609         return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1610 }
1611
1612 static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1613                                 int bytes)
1614 {
1615         int ret;
1616
1617         if (unlikely(ring->tail + bytes > ring->effective_size)) {
1618                 ret = intel_wrap_ring_buffer(ring);
1619                 if (unlikely(ret))
1620                         return ret;
1621         }
1622
1623         if (unlikely(ring->space < bytes)) {
1624                 ret = ring_wait_for_space(ring, bytes);
1625                 if (unlikely(ret))
1626                         return ret;
1627         }
1628
1629         return 0;
1630 }
1631
1632 int intel_ring_begin(struct intel_ring_buffer *ring,
1633                      int num_dwords)
1634 {
1635         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1636         int ret;
1637
1638         ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1639                                    dev_priv->mm.interruptible);
1640         if (ret)
1641                 return ret;
1642
1643         ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1644         if (ret)
1645                 return ret;
1646
1647         /* Preallocate the olr before touching the ring */
1648         ret = intel_ring_alloc_seqno(ring);
1649         if (ret)
1650                 return ret;
1651
1652         ring->space -= num_dwords * sizeof(uint32_t);
1653         return 0;
1654 }
1655
1656 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1657 {
1658         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1659
1660         BUG_ON(ring->outstanding_lazy_seqno);
1661
1662         if (INTEL_INFO(ring->dev)->gen >= 6) {
1663                 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1664                 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1665                 if (HAS_VEBOX(ring->dev))
1666                         I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1667         }
1668
1669         ring->set_seqno(ring, seqno);
1670         ring->hangcheck.seqno = seqno;
1671 }
1672
1673 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1674                                      u32 value)
1675 {
1676         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1677
1678        /* Every tail move must follow the sequence below */
1679
1680         /* Disable notification that the ring is IDLE. The GT
1681          * will then assume that it is busy and bring it out of rc6.
1682          */
1683         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1684                    _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1685
1686         /* Clear the context id. Here be magic! */
1687         I915_WRITE64(GEN6_BSD_RNCID, 0x0);
1688
1689         /* Wait for the ring not to be idle, i.e. for it to wake up. */
1690         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1691                       GEN6_BSD_SLEEP_INDICATOR) == 0,
1692                      50))
1693                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1694
1695         /* Now that the ring is fully powered up, update the tail */
1696         I915_WRITE_TAIL(ring, value);
1697         POSTING_READ(RING_TAIL(ring->mmio_base));
1698
1699         /* Let the ring send IDLE messages to the GT again,
1700          * and so let it sleep to conserve power when idle.
1701          */
1702         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1703                    _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1704 }
1705
1706 static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1707                                u32 invalidate, u32 flush)
1708 {
1709         uint32_t cmd;
1710         int ret;
1711
1712         ret = intel_ring_begin(ring, 4);
1713         if (ret)
1714                 return ret;
1715
1716         cmd = MI_FLUSH_DW;
1717         if (INTEL_INFO(ring->dev)->gen >= 8)
1718                 cmd += 1;
1719         /*
1720          * Bspec vol 1c.5 - video engine command streamer:
1721          * "If ENABLED, all TLBs will be invalidated once the flush
1722          * operation is complete. This bit is only valid when the
1723          * Post-Sync Operation field is a value of 1h or 3h."
1724          */
1725         if (invalidate & I915_GEM_GPU_DOMAINS)
1726                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1727                         MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1728         intel_ring_emit(ring, cmd);
1729         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1730         if (INTEL_INFO(ring->dev)->gen >= 8) {
1731                 intel_ring_emit(ring, 0); /* upper addr */
1732                 intel_ring_emit(ring, 0); /* value */
1733         } else  {
1734                 intel_ring_emit(ring, 0);
1735                 intel_ring_emit(ring, MI_NOOP);
1736         }
1737         intel_ring_advance(ring);
1738         return 0;
1739 }
1740
1741 static int
1742 gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1743                               u32 offset, u32 len,
1744                               unsigned flags)
1745 {
1746         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1747         bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1748                 !(flags & I915_DISPATCH_SECURE);
1749         int ret;
1750
1751         ret = intel_ring_begin(ring, 4);
1752         if (ret)
1753                 return ret;
1754
1755         /* FIXME(BDW): Address space and security selectors. */
1756         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1757         intel_ring_emit(ring, offset);
1758         intel_ring_emit(ring, 0);
1759         intel_ring_emit(ring, MI_NOOP);
1760         intel_ring_advance(ring);
1761
1762         return 0;
1763 }
1764
1765 static int
1766 hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1767                               u32 offset, u32 len,
1768                               unsigned flags)
1769 {
1770         int ret;
1771
1772         ret = intel_ring_begin(ring, 2);
1773         if (ret)
1774                 return ret;
1775
1776         intel_ring_emit(ring,
1777                         MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1778                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1779         /* bit0-7 is the length on GEN6+ */
1780         intel_ring_emit(ring, offset);
1781         intel_ring_advance(ring);
1782
1783         return 0;
1784 }
1785
1786 static int
1787 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1788                               u32 offset, u32 len,
1789                               unsigned flags)
1790 {
1791         int ret;
1792
1793         ret = intel_ring_begin(ring, 2);
1794         if (ret)
1795                 return ret;
1796
1797         intel_ring_emit(ring,
1798                         MI_BATCH_BUFFER_START |
1799                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1800         /* bit0-7 is the length on GEN6+ */
1801         intel_ring_emit(ring, offset);
1802         intel_ring_advance(ring);
1803
1804         return 0;
1805 }
1806
1807 /* Blitter support (SandyBridge+) */
1808
1809 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1810                            u32 invalidate, u32 flush)
1811 {
1812         struct drm_device *dev = ring->dev;
1813         uint32_t cmd;
1814         int ret;
1815
1816         ret = intel_ring_begin(ring, 4);
1817         if (ret)
1818                 return ret;
1819
1820         cmd = MI_FLUSH_DW;
1821         if (INTEL_INFO(ring->dev)->gen >= 8)
1822                 cmd += 1;
1823         /*
1824          * Bspec vol 1c.3 - blitter engine command streamer:
1825          * "If ENABLED, all TLBs will be invalidated once the flush
1826          * operation is complete. This bit is only valid when the
1827          * Post-Sync Operation field is a value of 1h or 3h."
1828          */
1829         if (invalidate & I915_GEM_DOMAIN_RENDER)
1830                 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1831                         MI_FLUSH_DW_OP_STOREDW;
1832         intel_ring_emit(ring, cmd);
1833         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1834         if (INTEL_INFO(ring->dev)->gen >= 8) {
1835                 intel_ring_emit(ring, 0); /* upper addr */
1836                 intel_ring_emit(ring, 0); /* value */
1837         } else  {
1838                 intel_ring_emit(ring, 0);
1839                 intel_ring_emit(ring, MI_NOOP);
1840         }
1841         intel_ring_advance(ring);
1842
1843         if (IS_GEN7(dev) && !invalidate && flush)
1844                 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1845
1846         return 0;
1847 }
1848
1849 int intel_init_render_ring_buffer(struct drm_device *dev)
1850 {
1851         drm_i915_private_t *dev_priv = dev->dev_private;
1852         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1853
1854         ring->name = "render ring";
1855         ring->id = RCS;
1856         ring->mmio_base = RENDER_RING_BASE;
1857
1858         if (INTEL_INFO(dev)->gen >= 6) {
1859                 ring->add_request = gen6_add_request;
1860                 ring->flush = gen7_render_ring_flush;
1861                 if (INTEL_INFO(dev)->gen == 6)
1862                         ring->flush = gen6_render_ring_flush;
1863                 if (INTEL_INFO(dev)->gen >= 8) {
1864                         ring->flush = gen8_render_ring_flush;
1865                         ring->irq_get = gen8_ring_get_irq;
1866                         ring->irq_put = gen8_ring_put_irq;
1867                 } else {
1868                         ring->irq_get = gen6_ring_get_irq;
1869                         ring->irq_put = gen6_ring_put_irq;
1870                 }
1871                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1872                 ring->get_seqno = gen6_ring_get_seqno;
1873                 ring->set_seqno = ring_set_seqno;
1874                 ring->sync_to = gen6_ring_sync;
1875                 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1876                 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
1877                 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
1878                 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
1879                 ring->signal_mbox[RCS] = GEN6_NOSYNC;
1880                 ring->signal_mbox[VCS] = GEN6_VRSYNC;
1881                 ring->signal_mbox[BCS] = GEN6_BRSYNC;
1882                 ring->signal_mbox[VECS] = GEN6_VERSYNC;
1883         } else if (IS_GEN5(dev)) {
1884                 ring->add_request = pc_render_add_request;
1885                 ring->flush = gen4_render_ring_flush;
1886                 ring->get_seqno = pc_render_get_seqno;
1887                 ring->set_seqno = pc_render_set_seqno;
1888                 ring->irq_get = gen5_ring_get_irq;
1889                 ring->irq_put = gen5_ring_put_irq;
1890                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
1891                                         GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
1892         } else {
1893                 ring->add_request = i9xx_add_request;
1894                 if (INTEL_INFO(dev)->gen < 4)
1895                         ring->flush = gen2_render_ring_flush;
1896                 else
1897                         ring->flush = gen4_render_ring_flush;
1898                 ring->get_seqno = ring_get_seqno;
1899                 ring->set_seqno = ring_set_seqno;
1900                 if (IS_GEN2(dev)) {
1901                         ring->irq_get = i8xx_ring_get_irq;
1902                         ring->irq_put = i8xx_ring_put_irq;
1903                 } else {
1904                         ring->irq_get = i9xx_ring_get_irq;
1905                         ring->irq_put = i9xx_ring_put_irq;
1906                 }
1907                 ring->irq_enable_mask = I915_USER_INTERRUPT;
1908         }
1909         ring->write_tail = ring_write_tail;
1910         if (IS_HASWELL(dev))
1911                 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1912         else if (IS_GEN8(dev))
1913                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1914         else if (INTEL_INFO(dev)->gen >= 6)
1915                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1916         else if (INTEL_INFO(dev)->gen >= 4)
1917                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1918         else if (IS_I830(dev) || IS_845G(dev))
1919                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1920         else
1921                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1922         ring->init = init_render_ring;
1923         ring->cleanup = render_ring_cleanup;
1924
1925         /* Workaround batchbuffer to combat CS tlb bug. */
1926         if (HAS_BROKEN_CS_TLB(dev)) {
1927                 struct drm_i915_gem_object *obj;
1928                 int ret;
1929
1930                 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1931                 if (obj == NULL) {
1932                         DRM_ERROR("Failed to allocate batch bo\n");
1933                         return -ENOMEM;
1934                 }
1935
1936                 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1937                 if (ret != 0) {
1938                         drm_gem_object_unreference(&obj->base);
1939                         DRM_ERROR("Failed to ping batch bo\n");
1940                         return ret;
1941                 }
1942
1943                 ring->scratch.obj = obj;
1944                 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
1945         }
1946
1947         return intel_init_ring_buffer(dev, ring);
1948 }
1949
1950 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1951 {
1952         drm_i915_private_t *dev_priv = dev->dev_private;
1953         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1954         int ret;
1955
1956         ring->name = "render ring";
1957         ring->id = RCS;
1958         ring->mmio_base = RENDER_RING_BASE;
1959
1960         if (INTEL_INFO(dev)->gen >= 6) {
1961                 /* non-kms not supported on gen6+ */
1962                 return -ENODEV;
1963         }
1964
1965         /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1966          * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1967          * the special gen5 functions. */
1968         ring->add_request = i9xx_add_request;
1969         if (INTEL_INFO(dev)->gen < 4)
1970                 ring->flush = gen2_render_ring_flush;
1971         else
1972                 ring->flush = gen4_render_ring_flush;
1973         ring->get_seqno = ring_get_seqno;
1974         ring->set_seqno = ring_set_seqno;
1975         if (IS_GEN2(dev)) {
1976                 ring->irq_get = i8xx_ring_get_irq;
1977                 ring->irq_put = i8xx_ring_put_irq;
1978         } else {
1979                 ring->irq_get = i9xx_ring_get_irq;
1980                 ring->irq_put = i9xx_ring_put_irq;
1981         }
1982         ring->irq_enable_mask = I915_USER_INTERRUPT;
1983         ring->write_tail = ring_write_tail;
1984         if (INTEL_INFO(dev)->gen >= 4)
1985                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1986         else if (IS_I830(dev) || IS_845G(dev))
1987                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1988         else
1989                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1990         ring->init = init_render_ring;
1991         ring->cleanup = render_ring_cleanup;
1992
1993         ring->dev = dev;
1994         INIT_LIST_HEAD(&ring->active_list);
1995         INIT_LIST_HEAD(&ring->request_list);
1996
1997         ring->size = size;
1998         ring->effective_size = ring->size;
1999         if (IS_I830(ring->dev) || IS_845G(ring->dev))
2000                 ring->effective_size -= 128;
2001
2002         ring->virtual_start = ioremap_wc(start, size);
2003         if (ring->virtual_start == NULL) {
2004                 DRM_ERROR("can not ioremap virtual address for"
2005                           " ring buffer\n");
2006                 return -ENOMEM;
2007         }
2008
2009         if (!I915_NEED_GFX_HWS(dev)) {
2010                 ret = init_phys_status_page(ring);
2011                 if (ret)
2012                         return ret;
2013         }
2014
2015         return 0;
2016 }
2017
2018 int intel_init_bsd_ring_buffer(struct drm_device *dev)
2019 {
2020         drm_i915_private_t *dev_priv = dev->dev_private;
2021         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
2022
2023         ring->name = "bsd ring";
2024         ring->id = VCS;
2025
2026         ring->write_tail = ring_write_tail;
2027         if (INTEL_INFO(dev)->gen >= 6) {
2028                 ring->mmio_base = GEN6_BSD_RING_BASE;
2029                 /* gen6 bsd needs a special wa for tail updates */
2030                 if (IS_GEN6(dev))
2031                         ring->write_tail = gen6_bsd_ring_write_tail;
2032                 ring->flush = gen6_bsd_ring_flush;
2033                 ring->add_request = gen6_add_request;
2034                 ring->get_seqno = gen6_ring_get_seqno;
2035                 ring->set_seqno = ring_set_seqno;
2036                 if (INTEL_INFO(dev)->gen >= 8) {
2037                         ring->irq_enable_mask =
2038                                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2039                         ring->irq_get = gen8_ring_get_irq;
2040                         ring->irq_put = gen8_ring_put_irq;
2041                         ring->dispatch_execbuffer =
2042                                 gen8_ring_dispatch_execbuffer;
2043                 } else {
2044                         ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2045                         ring->irq_get = gen6_ring_get_irq;
2046                         ring->irq_put = gen6_ring_put_irq;
2047                         ring->dispatch_execbuffer =
2048                                 gen6_ring_dispatch_execbuffer;
2049                 }
2050                 ring->sync_to = gen6_ring_sync;
2051                 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
2052                 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2053                 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
2054                 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
2055                 ring->signal_mbox[RCS] = GEN6_RVSYNC;
2056                 ring->signal_mbox[VCS] = GEN6_NOSYNC;
2057                 ring->signal_mbox[BCS] = GEN6_BVSYNC;
2058                 ring->signal_mbox[VECS] = GEN6_VEVSYNC;
2059         } else {
2060                 ring->mmio_base = BSD_RING_BASE;
2061                 ring->flush = bsd_ring_flush;
2062                 ring->add_request = i9xx_add_request;
2063                 ring->get_seqno = ring_get_seqno;
2064                 ring->set_seqno = ring_set_seqno;
2065                 if (IS_GEN5(dev)) {
2066                         ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2067                         ring->irq_get = gen5_ring_get_irq;
2068                         ring->irq_put = gen5_ring_put_irq;
2069                 } else {
2070                         ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2071                         ring->irq_get = i9xx_ring_get_irq;
2072                         ring->irq_put = i9xx_ring_put_irq;
2073                 }
2074                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2075         }
2076         ring->init = init_ring_common;
2077
2078         return intel_init_ring_buffer(dev, ring);
2079 }
2080
2081 int intel_init_blt_ring_buffer(struct drm_device *dev)
2082 {
2083         drm_i915_private_t *dev_priv = dev->dev_private;
2084         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
2085
2086         ring->name = "blitter ring";
2087         ring->id = BCS;
2088
2089         ring->mmio_base = BLT_RING_BASE;
2090         ring->write_tail = ring_write_tail;
2091         ring->flush = gen6_ring_flush;
2092         ring->add_request = gen6_add_request;
2093         ring->get_seqno = gen6_ring_get_seqno;
2094         ring->set_seqno = ring_set_seqno;
2095         if (INTEL_INFO(dev)->gen >= 8) {
2096                 ring->irq_enable_mask =
2097                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2098                 ring->irq_get = gen8_ring_get_irq;
2099                 ring->irq_put = gen8_ring_put_irq;
2100                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2101         } else {
2102                 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2103                 ring->irq_get = gen6_ring_get_irq;
2104                 ring->irq_put = gen6_ring_put_irq;
2105                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2106         }
2107         ring->sync_to = gen6_ring_sync;
2108         ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
2109         ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
2110         ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2111         ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
2112         ring->signal_mbox[RCS] = GEN6_RBSYNC;
2113         ring->signal_mbox[VCS] = GEN6_VBSYNC;
2114         ring->signal_mbox[BCS] = GEN6_NOSYNC;
2115         ring->signal_mbox[VECS] = GEN6_VEBSYNC;
2116         ring->init = init_ring_common;
2117
2118         return intel_init_ring_buffer(dev, ring);
2119 }
2120
2121 int intel_init_vebox_ring_buffer(struct drm_device *dev)
2122 {
2123         drm_i915_private_t *dev_priv = dev->dev_private;
2124         struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
2125
2126         ring->name = "video enhancement ring";
2127         ring->id = VECS;
2128
2129         ring->mmio_base = VEBOX_RING_BASE;
2130         ring->write_tail = ring_write_tail;
2131         ring->flush = gen6_ring_flush;
2132         ring->add_request = gen6_add_request;
2133         ring->get_seqno = gen6_ring_get_seqno;
2134         ring->set_seqno = ring_set_seqno;
2135
2136         if (INTEL_INFO(dev)->gen >= 8) {
2137                 ring->irq_enable_mask =
2138                         GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2139                 ring->irq_get = gen8_ring_get_irq;
2140                 ring->irq_put = gen8_ring_put_irq;
2141                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2142         } else {
2143                 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2144                 ring->irq_get = hsw_vebox_get_irq;
2145                 ring->irq_put = hsw_vebox_put_irq;
2146                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2147         }
2148         ring->sync_to = gen6_ring_sync;
2149         ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
2150         ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
2151         ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
2152         ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2153         ring->signal_mbox[RCS] = GEN6_RVESYNC;
2154         ring->signal_mbox[VCS] = GEN6_VVESYNC;
2155         ring->signal_mbox[BCS] = GEN6_BVESYNC;
2156         ring->signal_mbox[VECS] = GEN6_NOSYNC;
2157         ring->init = init_ring_common;
2158
2159         return intel_init_ring_buffer(dev, ring);
2160 }
2161
2162 int
2163 intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
2164 {
2165         int ret;
2166
2167         if (!ring->gpu_caches_dirty)
2168                 return 0;
2169
2170         ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2171         if (ret)
2172                 return ret;
2173
2174         trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2175
2176         ring->gpu_caches_dirty = false;
2177         return 0;
2178 }
2179
2180 int
2181 intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
2182 {
2183         uint32_t flush_domains;
2184         int ret;
2185
2186         flush_domains = 0;
2187         if (ring->gpu_caches_dirty)
2188                 flush_domains = I915_GEM_GPU_DOMAINS;
2189
2190         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2191         if (ret)
2192                 return ret;
2193
2194         trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2195
2196         ring->gpu_caches_dirty = false;
2197         return 0;
2198 }