1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 struct intel_hw_status_page {
7 struct drm_i915_gem_object *obj;
10 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
11 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
13 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
14 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
16 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
17 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
19 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
20 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
22 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
23 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
25 #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
26 #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
27 #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
29 struct intel_ring_buffer {
36 #define I915_NUM_RINGS 3
38 void __iomem *virtual_start;
39 struct drm_device *dev;
40 struct drm_i915_gem_object *obj;
47 struct intel_hw_status_page status_page;
49 /** We track the position of the requests in the ring buffer, and
50 * when each is retired we increment last_retired_head as the GPU
51 * must have finished processing the request and so we know we
52 * can advance the ringbuffer up to that position.
54 * last_retired_head is set to -1 after the value is consumed so
55 * we can detect new retirements.
57 u32 last_retired_head;
59 u32 irq_refcount; /* protected by dev_priv->irq_lock */
60 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
62 u32 sync_seqno[I915_NUM_RINGS-1];
63 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
64 void (*irq_put)(struct intel_ring_buffer *ring);
66 int (*init)(struct intel_ring_buffer *ring);
68 void (*write_tail)(struct intel_ring_buffer *ring,
70 int __must_check (*flush)(struct intel_ring_buffer *ring,
71 u32 invalidate_domains,
73 int (*add_request)(struct intel_ring_buffer *ring,
75 u32 (*get_seqno)(struct intel_ring_buffer *ring);
76 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
77 u32 offset, u32 length);
78 void (*cleanup)(struct intel_ring_buffer *ring);
79 int (*sync_to)(struct intel_ring_buffer *ring,
80 struct intel_ring_buffer *to,
83 u32 semaphore_register[3]; /*our mbox written by others */
84 u32 signal_mbox[2]; /* mboxes this ring signals to */
86 * List of objects currently involved in rendering from the
89 * Includes buffers having the contents of their GPU caches
90 * flushed, not necessarily primitives. last_rendering_seqno
91 * represents when the rendering involved will be completed.
93 * A reference is held on the buffer while on this list.
95 struct list_head active_list;
98 * List of breadcrumbs associated with GPU requests currently
101 struct list_head request_list;
104 * List of objects currently pending a GPU write flush.
106 * All elements on this list will belong to either the
107 * active_list or flushing_list, last_rendering_seqno can
108 * be used to differentiate between the two elements.
110 struct list_head gpu_write_list;
113 * Do we have some not yet emitted requests outstanding?
115 u32 outstanding_lazy_request;
117 wait_queue_head_t irq_queue;
123 intel_ring_initialized(struct intel_ring_buffer *ring)
125 return ring->obj != NULL;
128 static inline unsigned
129 intel_ring_flag(struct intel_ring_buffer *ring)
131 return 1 << ring->id;
135 intel_ring_sync_index(struct intel_ring_buffer *ring,
136 struct intel_ring_buffer *other)
141 * cs -> 0 = vcs, 1 = bcs
142 * vcs -> 0 = bcs, 1 = cs,
143 * bcs -> 0 = cs, 1 = vcs.
146 idx = (other - ring) - 1;
148 idx += I915_NUM_RINGS;
154 intel_read_status_page(struct intel_ring_buffer *ring,
157 /* Ensure that the compiler doesn't optimize away the load. */
159 return ring->status_page.page_addr[reg];
163 * Reads a dword out of the status page, which is written to from the command
164 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
167 * The following dwords have a reserved meaning:
168 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
169 * 0x04: ring 0 head pointer
170 * 0x05: ring 1 head pointer (915-class)
171 * 0x06: ring 2 head pointer (915-class)
172 * 0x10-0x1b: Context status DWords (GM45)
173 * 0x1f: Last written status offset. (GM45)
175 * The area from dword 0x20 to 0x3ff is available for driver usage.
177 #define I915_GEM_HWS_INDEX 0x20
179 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
181 int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
182 static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
184 return intel_wait_ring_buffer(ring, ring->size - 8);
187 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
189 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
192 iowrite32(data, ring->virtual_start + ring->tail);
196 void intel_ring_advance(struct intel_ring_buffer *ring);
198 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
200 int intel_init_render_ring_buffer(struct drm_device *dev);
201 int intel_init_bsd_ring_buffer(struct drm_device *dev);
202 int intel_init_blt_ring_buffer(struct drm_device *dev);
204 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
205 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
207 static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
212 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
214 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
215 ring->trace_irq_seqno = seqno;
219 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
221 #endif /* _INTEL_RINGBUFFER_H_ */