1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 #include <linux/hashtable.h> 5 #include "i915_gem_batch_pool.h" 6 7 #define I915_CMD_HASH_ORDER 9 8 9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 11 * to give some inclination as to some of the magic values used in the various 12 * workarounds! 13 */ 14 #define CACHELINE_BYTES 64 15 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) 16 17 /* 18 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 19 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 20 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 21 * 22 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 23 * cacheline, the Head Pointer must not be greater than the Tail 24 * Pointer." 25 */ 26 #define I915_RING_FREE_SPACE 64 27 28 struct intel_hw_status_page { 29 u32 *page_addr; 30 unsigned int gfx_addr; 31 struct drm_i915_gem_object *obj; 32 }; 33 34 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 35 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 36 37 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 38 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 39 40 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 41 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 42 43 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 44 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 45 46 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 47 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 48 49 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 50 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 51 52 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to 53 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. 54 */ 55 #define i915_semaphore_seqno_size sizeof(uint64_t) 56 #define GEN8_SIGNAL_OFFSET(__ring, to) \ 57 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 58 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ 59 (i915_semaphore_seqno_size * (to))) 60 61 #define GEN8_WAIT_OFFSET(__ring, from) \ 62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 63 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ 64 (i915_semaphore_seqno_size * (__ring)->id)) 65 66 #define GEN8_RING_SEMAPHORE_INIT do { \ 67 if (!dev_priv->semaphore_obj) { \ 68 break; \ 69 } \ 70 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ 71 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ 72 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ 73 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ 74 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ 75 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ 76 } while(0) 77 78 enum intel_ring_hangcheck_action { 79 HANGCHECK_IDLE = 0, 80 HANGCHECK_WAIT, 81 HANGCHECK_ACTIVE, 82 HANGCHECK_ACTIVE_LOOP, 83 HANGCHECK_KICK, 84 HANGCHECK_HUNG, 85 }; 86 87 #define HANGCHECK_SCORE_RING_HUNG 31 88 89 struct intel_ring_hangcheck { 90 u64 acthd; 91 u64 max_acthd; 92 u32 seqno; 93 int score; 94 enum intel_ring_hangcheck_action action; 95 int deadlock; 96 }; 97 98 struct intel_ringbuffer { 99 struct drm_i915_gem_object *obj; 100 char __iomem *virtual_start; 101 102 struct intel_engine_cs *ring; 103 struct list_head link; 104 105 unsigned int virtual_count; 106 u32 head; 107 u32 tail; 108 int space; 109 int size; 110 int effective_size; 111 int reserved_size; 112 int reserved_tail; 113 bool reserved_in_use; 114 115 /** We track the position of the requests in the ring buffer, and 116 * when each is retired we increment last_retired_head as the GPU 117 * must have finished processing the request and so we know we 118 * can advance the ringbuffer up to that position. 119 * 120 * last_retired_head is set to -1 after the value is consumed so 121 * we can detect new retirements. 122 */ 123 u32 last_retired_head; 124 }; 125 126 struct intel_context; 127 struct drm_i915_reg_descriptor; 128 129 /* 130 * we use a single page to load ctx workarounds so all of these 131 * values are referred in terms of dwords 132 * 133 * struct i915_wa_ctx_bb: 134 * offset: specifies batch starting position, also helpful in case 135 * if we want to have multiple batches at different offsets based on 136 * some criteria. It is not a requirement at the moment but provides 137 * an option for future use. 138 * size: size of the batch in DWORDS 139 */ 140 struct i915_ctx_workarounds { 141 struct i915_wa_ctx_bb { 142 u32 offset; 143 u32 size; 144 } indirect_ctx, per_ctx; 145 struct drm_i915_gem_object *obj; 146 }; 147 148 struct intel_engine_cs { 149 const char *name; 150 enum intel_ring_id { 151 RCS = 0x0, 152 VCS, 153 BCS, 154 VECS, 155 VCS2 156 } id; 157 #define I915_NUM_RINGS 5 158 #define LAST_USER_RING (VECS + 1) 159 u32 mmio_base; 160 struct drm_device *dev; 161 struct intel_ringbuffer *buffer; 162 struct list_head buffers; 163 164 /* 165 * A pool of objects to use as shadow copies of client batch buffers 166 * when the command parser is enabled. Prevents the client from 167 * modifying the batch contents after software parsing. 168 */ 169 struct i915_gem_batch_pool batch_pool; 170 171 struct intel_hw_status_page status_page; 172 struct i915_ctx_workarounds wa_ctx; 173 174 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 175 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 176 struct drm_i915_gem_request *trace_irq_req; 177 bool __must_check (*irq_get)(struct intel_engine_cs *ring); 178 void (*irq_put)(struct intel_engine_cs *ring); 179 180 int (*init_hw)(struct intel_engine_cs *ring); 181 182 int (*init_context)(struct drm_i915_gem_request *req); 183 184 void (*write_tail)(struct intel_engine_cs *ring, 185 u32 value); 186 int __must_check (*flush)(struct drm_i915_gem_request *req, 187 u32 invalidate_domains, 188 u32 flush_domains); 189 int (*add_request)(struct drm_i915_gem_request *req); 190 /* Some chipsets are not quite as coherent as advertised and need 191 * an expensive kick to force a true read of the up-to-date seqno. 192 * However, the up-to-date seqno is not always required and the last 193 * seen value is good enough. Note that the seqno will always be 194 * monotonic, even if not coherent. 195 */ 196 u32 (*get_seqno)(struct intel_engine_cs *ring, 197 bool lazy_coherency); 198 void (*set_seqno)(struct intel_engine_cs *ring, 199 u32 seqno); 200 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, 201 u64 offset, u32 length, 202 unsigned dispatch_flags); 203 #define I915_DISPATCH_SECURE 0x1 204 #define I915_DISPATCH_PINNED 0x2 205 #define I915_DISPATCH_RS 0x4 206 void (*cleanup)(struct intel_engine_cs *ring); 207 208 /* GEN8 signal/wait table - never trust comments! 209 * signal to signal to signal to signal to signal to 210 * RCS VCS BCS VECS VCS2 211 * -------------------------------------------------------------------- 212 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | 213 * |------------------------------------------------------------------- 214 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | 215 * |------------------------------------------------------------------- 216 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | 217 * |------------------------------------------------------------------- 218 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | 219 * |------------------------------------------------------------------- 220 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | 221 * |------------------------------------------------------------------- 222 * 223 * Generalization: 224 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) 225 * ie. transpose of g(x, y) 226 * 227 * sync from sync from sync from sync from sync from 228 * RCS VCS BCS VECS VCS2 229 * -------------------------------------------------------------------- 230 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | 231 * |------------------------------------------------------------------- 232 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | 233 * |------------------------------------------------------------------- 234 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | 235 * |------------------------------------------------------------------- 236 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | 237 * |------------------------------------------------------------------- 238 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | 239 * |------------------------------------------------------------------- 240 * 241 * Generalization: 242 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) 243 * ie. transpose of f(x, y) 244 */ 245 struct { 246 u32 sync_seqno[I915_NUM_RINGS-1]; 247 248 union { 249 struct { 250 /* our mbox written by others */ 251 u32 wait[I915_NUM_RINGS]; 252 /* mboxes this ring signals to */ 253 i915_reg_t signal[I915_NUM_RINGS]; 254 } mbox; 255 u64 signal_ggtt[I915_NUM_RINGS]; 256 }; 257 258 /* AKA wait() */ 259 int (*sync_to)(struct drm_i915_gem_request *to_req, 260 struct intel_engine_cs *from, 261 u32 seqno); 262 int (*signal)(struct drm_i915_gem_request *signaller_req, 263 /* num_dwords needed by caller */ 264 unsigned int num_dwords); 265 } semaphore; 266 267 /* Execlists */ 268 struct lock execlist_lock; 269 struct list_head execlist_queue; 270 struct list_head execlist_retired_req_list; 271 u8 next_context_status_buffer; 272 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ 273 int (*emit_request)(struct drm_i915_gem_request *request); 274 int (*emit_flush)(struct drm_i915_gem_request *request, 275 u32 invalidate_domains, 276 u32 flush_domains); 277 int (*emit_bb_start)(struct drm_i915_gem_request *req, 278 u64 offset, unsigned dispatch_flags); 279 280 /** 281 * List of objects currently involved in rendering from the 282 * ringbuffer. 283 * 284 * Includes buffers having the contents of their GPU caches 285 * flushed, not necessarily primitives. last_read_req 286 * represents when the rendering involved will be completed. 287 * 288 * A reference is held on the buffer while on this list. 289 */ 290 struct list_head active_list; 291 292 /** 293 * List of breadcrumbs associated with GPU requests currently 294 * outstanding. 295 */ 296 struct list_head request_list; 297 298 /** 299 * Seqno of request most recently submitted to request_list. 300 * Used exclusively by hang checker to avoid grabbing lock while 301 * inspecting request list. 302 */ 303 u32 last_submitted_seqno; 304 305 bool gpu_caches_dirty; 306 307 wait_queue_head_t irq_queue; 308 309 struct intel_context *default_context; 310 struct intel_context *last_context; 311 312 struct intel_ring_hangcheck hangcheck; 313 314 struct { 315 struct drm_i915_gem_object *obj; 316 u32 gtt_offset; 317 volatile u32 *cpu_page; 318 } scratch; 319 320 bool needs_cmd_parser; 321 322 /* 323 * Table of commands the command parser needs to know about 324 * for this ring. 325 */ 326 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 327 328 /* 329 * Table of registers allowed in commands that read/write registers. 330 */ 331 const struct drm_i915_reg_descriptor *reg_table; 332 int reg_count; 333 334 /* 335 * Table of registers allowed in commands that read/write registers, but 336 * only from the DRM master. 337 */ 338 const struct drm_i915_reg_descriptor *master_reg_table; 339 int master_reg_count; 340 341 /* 342 * Returns the bitmask for the length field of the specified command. 343 * Return 0 for an unrecognized/invalid command. 344 * 345 * If the command parser finds an entry for a command in the ring's 346 * cmd_tables, it gets the command's length based on the table entry. 347 * If not, it calls this function to determine the per-ring length field 348 * encoding for the command (i.e. certain opcode ranges use certain bits 349 * to encode the command length in the header). 350 */ 351 u32 (*get_cmd_length_mask)(u32 cmd_header); 352 }; 353 354 static inline bool 355 intel_ring_initialized(struct intel_engine_cs *ring) 356 { 357 return ring->dev != NULL; 358 } 359 360 static inline unsigned 361 intel_ring_flag(struct intel_engine_cs *ring) 362 { 363 return 1 << ring->id; 364 } 365 366 static inline u32 367 intel_ring_sync_index(struct intel_engine_cs *ring, 368 struct intel_engine_cs *other) 369 { 370 int idx; 371 372 /* 373 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; 374 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; 375 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; 376 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; 377 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 378 */ 379 380 idx = (other - ring) - 1; 381 if (idx < 0) 382 idx += I915_NUM_RINGS; 383 384 return idx; 385 } 386 387 static inline void 388 intel_flush_status_page(struct intel_engine_cs *ring, int reg) 389 { 390 drm_clflush_virt_range(&ring->status_page.page_addr[reg], 391 sizeof(uint32_t)); 392 } 393 394 static inline u32 395 intel_read_status_page(struct intel_engine_cs *ring, 396 int reg) 397 { 398 /* Ensure that the compiler doesn't optimize away the load. */ 399 barrier(); 400 return ring->status_page.page_addr[reg]; 401 } 402 403 static inline void 404 intel_write_status_page(struct intel_engine_cs *ring, 405 int reg, u32 value) 406 { 407 ring->status_page.page_addr[reg] = value; 408 } 409 410 /** 411 * Reads a dword out of the status page, which is written to from the command 412 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 413 * MI_STORE_DATA_IMM. 414 * 415 * The following dwords have a reserved meaning: 416 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 417 * 0x04: ring 0 head pointer 418 * 0x05: ring 1 head pointer (915-class) 419 * 0x06: ring 2 head pointer (915-class) 420 * 0x10-0x1b: Context status DWords (GM45) 421 * 0x1f: Last written status offset. (GM45) 422 * 0x20-0x2f: Reserved (Gen6+) 423 * 424 * The area from dword 0x30 to 0x3ff is available for driver usage. 425 */ 426 #define I915_GEM_HWS_INDEX 0x30 427 #define I915_GEM_HWS_SCRATCH_INDEX 0x40 428 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 429 430 struct intel_ringbuffer * 431 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); 432 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 433 struct intel_ringbuffer *ringbuf); 434 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 435 void intel_ringbuffer_free(struct intel_ringbuffer *ring); 436 437 void intel_stop_ring_buffer(struct intel_engine_cs *ring); 438 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 439 440 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); 441 442 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); 443 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 444 static inline void intel_ring_emit(struct intel_engine_cs *ring, 445 u32 data) 446 { 447 struct intel_ringbuffer *ringbuf = ring->buffer; 448 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 449 ringbuf->tail += 4; 450 } 451 static inline void intel_ring_emit_reg(struct intel_engine_cs *ring, 452 i915_reg_t reg) 453 { 454 intel_ring_emit(ring, i915_mmio_reg_offset(reg)); 455 } 456 static inline void intel_ring_advance(struct intel_engine_cs *ring) 457 { 458 struct intel_ringbuffer *ringbuf = ring->buffer; 459 ringbuf->tail &= ringbuf->size - 1; 460 } 461 int __intel_ring_space(int head, int tail, int size); 462 void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 463 int intel_ring_space(struct intel_ringbuffer *ringbuf); 464 bool intel_ring_stopped(struct intel_engine_cs *ring); 465 466 int __must_check intel_ring_idle(struct intel_engine_cs *ring); 467 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); 468 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); 469 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); 470 471 void intel_fini_pipe_control(struct intel_engine_cs *ring); 472 int intel_init_pipe_control(struct intel_engine_cs *ring); 473 474 int intel_init_render_ring_buffer(struct drm_device *dev); 475 int intel_init_bsd_ring_buffer(struct drm_device *dev); 476 int intel_init_bsd2_ring_buffer(struct drm_device *dev); 477 int intel_init_blt_ring_buffer(struct drm_device *dev); 478 int intel_init_vebox_ring_buffer(struct drm_device *dev); 479 480 u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 481 482 int init_workarounds_ring(struct intel_engine_cs *ring); 483 484 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 485 { 486 return ringbuf->tail; 487 } 488 489 /* 490 * Arbitrary size for largest possible 'add request' sequence. The code paths 491 * are complex and variable. Empirical measurement shows that the worst case 492 * is ILK at 136 words. Reserving too much is better than reserving too little 493 * as that allows for corner cases that might have been missed. So the figure 494 * has been rounded up to 160 words. 495 */ 496 #define MIN_SPACE_FOR_ADD_REQUEST 160 497 498 /* 499 * Reserve space in the ring to guarantee that the i915_add_request() call 500 * will always have sufficient room to do its stuff. The request creation 501 * code calls this automatically. 502 */ 503 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); 504 /* Cancel the reservation, e.g. because the request is being discarded. */ 505 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); 506 /* Use the reserved space - for use by i915_add_request() only. */ 507 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); 508 /* Finish with the reserved space - for use by i915_add_request() only. */ 509 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); 510 511 /* Legacy ringbuffer specific portion of reservation code: */ 512 int intel_ring_reserve_space(struct drm_i915_gem_request *request); 513 514 #endif /* _INTEL_RINGBUFFER_H_ */ 515