1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 /* 5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 8 * 9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 10 * cacheline, the Head Pointer must not be greater than the Tail 11 * Pointer." 12 */ 13 #define I915_RING_FREE_SPACE 64 14 15 struct intel_hw_status_page { 16 u32 *page_addr; 17 unsigned int gfx_addr; 18 struct drm_i915_gem_object *obj; 19 }; 20 21 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 22 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 23 24 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 25 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 26 27 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 28 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 29 30 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 31 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 32 33 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 34 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 35 36 #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) 37 #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 38 #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 39 40 struct intel_ring_buffer { 41 const char *name; 42 enum intel_ring_id { 43 RCS = 0x0, 44 VCS, 45 BCS, 46 } id; 47 #define I915_NUM_RINGS 3 48 u32 mmio_base; 49 void __iomem *virtual_start; 50 struct drm_device *dev; 51 struct drm_i915_gem_object *obj; 52 53 u32 head; 54 u32 tail; 55 int space; 56 int size; 57 int effective_size; 58 struct intel_hw_status_page status_page; 59 60 /** We track the position of the requests in the ring buffer, and 61 * when each is retired we increment last_retired_head as the GPU 62 * must have finished processing the request and so we know we 63 * can advance the ringbuffer up to that position. 64 * 65 * last_retired_head is set to -1 after the value is consumed so 66 * we can detect new retirements. 67 */ 68 u32 last_retired_head; 69 70 u32 irq_refcount; /* protected by dev_priv->irq_lock */ 71 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 72 u32 trace_irq_seqno; 73 u32 sync_seqno[I915_NUM_RINGS-1]; 74 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 75 void (*irq_put)(struct intel_ring_buffer *ring); 76 77 int (*init)(struct intel_ring_buffer *ring); 78 79 void (*write_tail)(struct intel_ring_buffer *ring, 80 u32 value); 81 int __must_check (*flush)(struct intel_ring_buffer *ring, 82 u32 invalidate_domains, 83 u32 flush_domains); 84 int (*add_request)(struct intel_ring_buffer *ring); 85 /* Some chipsets are not quite as coherent as advertised and need 86 * an expensive kick to force a true read of the up-to-date seqno. 87 * However, the up-to-date seqno is not always required and the last 88 * seen value is good enough. Note that the seqno will always be 89 * monotonic, even if not coherent. 90 */ 91 u32 (*get_seqno)(struct intel_ring_buffer *ring, 92 bool lazy_coherency); 93 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 94 u32 offset, u32 length, 95 unsigned flags); 96 #define I915_DISPATCH_SECURE 0x1 97 #define I915_DISPATCH_PINNED 0x2 98 void (*cleanup)(struct intel_ring_buffer *ring); 99 int (*sync_to)(struct intel_ring_buffer *ring, 100 struct intel_ring_buffer *to, 101 u32 seqno); 102 103 u32 semaphore_register[3]; /*our mbox written by others */ 104 u32 signal_mbox[2]; /* mboxes this ring signals to */ 105 /** 106 * List of objects currently involved in rendering from the 107 * ringbuffer. 108 * 109 * Includes buffers having the contents of their GPU caches 110 * flushed, not necessarily primitives. last_rendering_seqno 111 * represents when the rendering involved will be completed. 112 * 113 * A reference is held on the buffer while on this list. 114 */ 115 struct list_head active_list; 116 117 /** 118 * List of breadcrumbs associated with GPU requests currently 119 * outstanding. 120 */ 121 struct list_head request_list; 122 123 /** 124 * Do we have some not yet emitted requests outstanding? 125 */ 126 u32 outstanding_lazy_request; 127 bool gpu_caches_dirty; 128 129 wait_queue_head_t irq_queue; 130 131 /** 132 * Do an explicit TLB flush before MI_SET_CONTEXT 133 */ 134 bool itlb_before_ctx_switch; 135 struct i915_hw_context *default_context; 136 struct drm_i915_gem_object *last_context_obj; 137 138 void *private; 139 }; 140 141 static inline bool 142 intel_ring_initialized(struct intel_ring_buffer *ring) 143 { 144 return ring->obj != NULL; 145 } 146 147 static inline unsigned 148 intel_ring_flag(struct intel_ring_buffer *ring) 149 { 150 return 1 << ring->id; 151 } 152 153 static inline u32 154 intel_ring_sync_index(struct intel_ring_buffer *ring, 155 struct intel_ring_buffer *other) 156 { 157 int idx; 158 159 /* 160 * cs -> 0 = vcs, 1 = bcs 161 * vcs -> 0 = bcs, 1 = cs, 162 * bcs -> 0 = cs, 1 = vcs. 163 */ 164 165 idx = (other - ring) - 1; 166 if (idx < 0) 167 idx += I915_NUM_RINGS; 168 169 return idx; 170 } 171 172 static inline u32 173 intel_read_status_page(struct intel_ring_buffer *ring, 174 int reg) 175 { 176 /* Ensure that the compiler doesn't optimize away the load. */ 177 cpu_ccfence(); 178 return ring->status_page.page_addr[reg]; 179 } 180 181 /** 182 * Reads a dword out of the status page, which is written to from the command 183 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 184 * MI_STORE_DATA_IMM. 185 * 186 * The following dwords have a reserved meaning: 187 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 188 * 0x04: ring 0 head pointer 189 * 0x05: ring 1 head pointer (915-class) 190 * 0x06: ring 2 head pointer (915-class) 191 * 0x10-0x1b: Context status DWords (GM45) 192 * 0x1f: Last written status offset. (GM45) 193 * 194 * The area from dword 0x20 to 0x3ff is available for driver usage. 195 */ 196 #define I915_GEM_HWS_INDEX 0x20 197 #define I915_GEM_HWS_SCRATCH_INDEX 0x30 198 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 199 200 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 201 202 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 203 static inline void intel_ring_emit(struct intel_ring_buffer *ring, 204 u32 data) 205 { 206 iowrite32(data, ring->virtual_start + ring->tail); 207 ring->tail += 4; 208 } 209 void intel_ring_advance(struct intel_ring_buffer *ring); 210 int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 211 212 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 213 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 214 215 int intel_init_render_ring_buffer(struct drm_device *dev); 216 int intel_init_bsd_ring_buffer(struct drm_device *dev); 217 int intel_init_blt_ring_buffer(struct drm_device *dev); 218 219 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 220 void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 221 222 static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) 223 { 224 return ring->tail; 225 } 226 227 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) 228 { 229 BUG_ON(ring->outstanding_lazy_request == 0); 230 return ring->outstanding_lazy_request; 231 } 232 233 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 234 { 235 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 236 ring->trace_irq_seqno = seqno; 237 } 238 239 /* DRI warts */ 240 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); 241 242 #endif /* _INTEL_RINGBUFFER_H_ */ 243