1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 #include <linux/hashtable.h> 5 6 #define I915_CMD_HASH_ORDER 9 7 8 /* 9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 11 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 12 * 13 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 14 * cacheline, the Head Pointer must not be greater than the Tail 15 * Pointer." 16 */ 17 #define I915_RING_FREE_SPACE 64 18 19 struct intel_hw_status_page { 20 u32 *page_addr; 21 unsigned int gfx_addr; 22 struct drm_i915_gem_object *obj; 23 }; 24 25 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 26 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 27 28 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 29 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 30 31 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 32 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 33 34 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 35 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 36 37 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 38 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 39 40 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 41 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 42 43 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to 44 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. 45 */ 46 #define i915_semaphore_seqno_size sizeof(uint64_t) 47 #define GEN8_SIGNAL_OFFSET(__ring, to) \ 48 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 49 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ 50 (i915_semaphore_seqno_size * (to))) 51 52 #define GEN8_WAIT_OFFSET(__ring, from) \ 53 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 54 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ 55 (i915_semaphore_seqno_size * (__ring)->id)) 56 57 #define GEN8_RING_SEMAPHORE_INIT do { \ 58 if (!dev_priv->semaphore_obj) { \ 59 break; \ 60 } \ 61 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ 62 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ 63 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ 64 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ 65 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ 66 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ 67 } while(0) 68 69 enum intel_ring_hangcheck_action { 70 HANGCHECK_IDLE = 0, 71 HANGCHECK_WAIT, 72 HANGCHECK_ACTIVE, 73 HANGCHECK_ACTIVE_LOOP, 74 HANGCHECK_KICK, 75 HANGCHECK_HUNG, 76 }; 77 78 #define HANGCHECK_SCORE_RING_HUNG 31 79 80 struct intel_ring_hangcheck { 81 u64 acthd; 82 u64 max_acthd; 83 u32 seqno; 84 int score; 85 enum intel_ring_hangcheck_action action; 86 int deadlock; 87 }; 88 89 struct intel_ringbuffer { 90 struct drm_i915_gem_object *obj; 91 void __iomem *virtual_start; 92 93 u32 head; 94 u32 tail; 95 int space; 96 int size; 97 int effective_size; 98 99 /** We track the position of the requests in the ring buffer, and 100 * when each is retired we increment last_retired_head as the GPU 101 * must have finished processing the request and so we know we 102 * can advance the ringbuffer up to that position. 103 * 104 * last_retired_head is set to -1 after the value is consumed so 105 * we can detect new retirements. 106 */ 107 u32 last_retired_head; 108 }; 109 110 struct intel_engine_cs { 111 const char *name; 112 enum intel_ring_id { 113 RCS = 0x0, 114 VCS, 115 BCS, 116 VECS, 117 VCS2 118 } id; 119 #define I915_NUM_RINGS 5 120 #define LAST_USER_RING (VECS + 1) 121 u32 mmio_base; 122 struct drm_device *dev; 123 struct intel_ringbuffer *buffer; 124 125 struct intel_hw_status_page status_page; 126 127 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 128 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 129 u32 trace_irq_seqno; 130 bool __must_check (*irq_get)(struct intel_engine_cs *ring); 131 void (*irq_put)(struct intel_engine_cs *ring); 132 133 int (*init)(struct intel_engine_cs *ring); 134 135 void (*write_tail)(struct intel_engine_cs *ring, 136 u32 value); 137 int __must_check (*flush)(struct intel_engine_cs *ring, 138 u32 invalidate_domains, 139 u32 flush_domains); 140 int (*add_request)(struct intel_engine_cs *ring); 141 /* Some chipsets are not quite as coherent as advertised and need 142 * an expensive kick to force a true read of the up-to-date seqno. 143 * However, the up-to-date seqno is not always required and the last 144 * seen value is good enough. Note that the seqno will always be 145 * monotonic, even if not coherent. 146 */ 147 u32 (*get_seqno)(struct intel_engine_cs *ring, 148 bool lazy_coherency); 149 void (*set_seqno)(struct intel_engine_cs *ring, 150 u32 seqno); 151 int (*dispatch_execbuffer)(struct intel_engine_cs *ring, 152 u64 offset, u32 length, 153 unsigned flags); 154 #define I915_DISPATCH_SECURE 0x1 155 #define I915_DISPATCH_PINNED 0x2 156 void (*cleanup)(struct intel_engine_cs *ring); 157 158 /* GEN8 signal/wait table - never trust comments! 159 * signal to signal to signal to signal to signal to 160 * RCS VCS BCS VECS VCS2 161 * -------------------------------------------------------------------- 162 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | 163 * |------------------------------------------------------------------- 164 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | 165 * |------------------------------------------------------------------- 166 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | 167 * |------------------------------------------------------------------- 168 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | 169 * |------------------------------------------------------------------- 170 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | 171 * |------------------------------------------------------------------- 172 * 173 * Generalization: 174 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) 175 * ie. transpose of g(x, y) 176 * 177 * sync from sync from sync from sync from sync from 178 * RCS VCS BCS VECS VCS2 179 * -------------------------------------------------------------------- 180 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | 181 * |------------------------------------------------------------------- 182 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | 183 * |------------------------------------------------------------------- 184 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | 185 * |------------------------------------------------------------------- 186 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | 187 * |------------------------------------------------------------------- 188 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | 189 * |------------------------------------------------------------------- 190 * 191 * Generalization: 192 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) 193 * ie. transpose of f(x, y) 194 */ 195 struct { 196 u32 sync_seqno[I915_NUM_RINGS-1]; 197 198 union { 199 struct { 200 /* our mbox written by others */ 201 u32 wait[I915_NUM_RINGS]; 202 /* mboxes this ring signals to */ 203 u32 signal[I915_NUM_RINGS]; 204 } mbox; 205 u64 signal_ggtt[I915_NUM_RINGS]; 206 }; 207 208 /* AKA wait() */ 209 int (*sync_to)(struct intel_engine_cs *ring, 210 struct intel_engine_cs *to, 211 u32 seqno); 212 int (*signal)(struct intel_engine_cs *signaller, 213 /* num_dwords needed by caller */ 214 unsigned int num_dwords); 215 } semaphore; 216 217 /** 218 * List of objects currently involved in rendering from the 219 * ringbuffer. 220 * 221 * Includes buffers having the contents of their GPU caches 222 * flushed, not necessarily primitives. last_rendering_seqno 223 * represents when the rendering involved will be completed. 224 * 225 * A reference is held on the buffer while on this list. 226 */ 227 struct list_head active_list; 228 229 /** 230 * List of breadcrumbs associated with GPU requests currently 231 * outstanding. 232 */ 233 struct list_head request_list; 234 235 /** 236 * Do we have some not yet emitted requests outstanding? 237 */ 238 struct drm_i915_gem_request *preallocated_lazy_request; 239 u32 outstanding_lazy_seqno; 240 bool gpu_caches_dirty; 241 bool fbc_dirty; 242 243 wait_queue_head_t irq_queue; 244 245 struct intel_context *default_context; 246 struct intel_context *last_context; 247 248 struct intel_ring_hangcheck hangcheck; 249 250 struct { 251 struct drm_i915_gem_object *obj; 252 u32 gtt_offset; 253 volatile u32 *cpu_page; 254 } scratch; 255 256 bool needs_cmd_parser; 257 258 /* 259 * Table of commands the command parser needs to know about 260 * for this ring. 261 */ 262 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 263 264 /* 265 * Table of registers allowed in commands that read/write registers. 266 */ 267 const u32 *reg_table; 268 int reg_count; 269 270 /* 271 * Table of registers allowed in commands that read/write registers, but 272 * only from the DRM master. 273 */ 274 const u32 *master_reg_table; 275 int master_reg_count; 276 277 /* 278 * Returns the bitmask for the length field of the specified command. 279 * Return 0 for an unrecognized/invalid command. 280 * 281 * If the command parser finds an entry for a command in the ring's 282 * cmd_tables, it gets the command's length based on the table entry. 283 * If not, it calls this function to determine the per-ring length field 284 * encoding for the command (i.e. certain opcode ranges use certain bits 285 * to encode the command length in the header). 286 */ 287 u32 (*get_cmd_length_mask)(u32 cmd_header); 288 }; 289 290 static inline bool 291 intel_ring_initialized(struct intel_engine_cs *ring) 292 { 293 return ring->buffer && ring->buffer->obj; 294 } 295 296 static inline unsigned 297 intel_ring_flag(struct intel_engine_cs *ring) 298 { 299 return 1 << ring->id; 300 } 301 302 static inline u32 303 intel_ring_sync_index(struct intel_engine_cs *ring, 304 struct intel_engine_cs *other) 305 { 306 int idx; 307 308 /* 309 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; 310 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; 311 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; 312 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; 313 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 314 */ 315 316 idx = (other - ring) - 1; 317 if (idx < 0) 318 idx += I915_NUM_RINGS; 319 320 return idx; 321 } 322 323 static inline u32 324 intel_read_status_page(struct intel_engine_cs *ring, 325 int reg) 326 { 327 /* Ensure that the compiler doesn't optimize away the load. */ 328 barrier(); 329 return ring->status_page.page_addr[reg]; 330 } 331 332 static inline void 333 intel_write_status_page(struct intel_engine_cs *ring, 334 int reg, u32 value) 335 { 336 ring->status_page.page_addr[reg] = value; 337 } 338 339 /** 340 * Reads a dword out of the status page, which is written to from the command 341 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 342 * MI_STORE_DATA_IMM. 343 * 344 * The following dwords have a reserved meaning: 345 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 346 * 0x04: ring 0 head pointer 347 * 0x05: ring 1 head pointer (915-class) 348 * 0x06: ring 2 head pointer (915-class) 349 * 0x10-0x1b: Context status DWords (GM45) 350 * 0x1f: Last written status offset. (GM45) 351 * 352 * The area from dword 0x20 to 0x3ff is available for driver usage. 353 */ 354 #define I915_GEM_HWS_INDEX 0x20 355 #define I915_GEM_HWS_SCRATCH_INDEX 0x30 356 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 357 358 void intel_stop_ring_buffer(struct intel_engine_cs *ring); 359 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 360 361 int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); 362 int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); 363 static inline void intel_ring_emit(struct intel_engine_cs *ring, 364 u32 data) 365 { 366 struct intel_ringbuffer *ringbuf = ring->buffer; 367 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 368 ringbuf->tail += 4; 369 } 370 static inline void intel_ring_advance(struct intel_engine_cs *ring) 371 { 372 struct intel_ringbuffer *ringbuf = ring->buffer; 373 ringbuf->tail &= ringbuf->size - 1; 374 } 375 void __intel_ring_advance(struct intel_engine_cs *ring); 376 377 int __must_check intel_ring_idle(struct intel_engine_cs *ring); 378 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); 379 int intel_ring_flush_all_caches(struct intel_engine_cs *ring); 380 int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); 381 382 int intel_init_render_ring_buffer(struct drm_device *dev); 383 int intel_init_bsd_ring_buffer(struct drm_device *dev); 384 int intel_init_bsd2_ring_buffer(struct drm_device *dev); 385 int intel_init_blt_ring_buffer(struct drm_device *dev); 386 int intel_init_vebox_ring_buffer(struct drm_device *dev); 387 388 u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 389 void intel_ring_setup_status_page(struct intel_engine_cs *ring); 390 391 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 392 { 393 return ringbuf->tail; 394 } 395 396 static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) 397 { 398 BUG_ON(ring->outstanding_lazy_seqno == 0); 399 return ring->outstanding_lazy_seqno; 400 } 401 402 static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno) 403 { 404 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 405 ring->trace_irq_seqno = seqno; 406 } 407 408 /* DRI warts */ 409 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); 410 411 #endif /* _INTEL_RINGBUFFER_H_ */ 412