1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 #include <linux/hashtable.h> 5 #include "i915_gem_batch_pool.h" 6 7 #define I915_CMD_HASH_ORDER 9 8 9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 11 * to give some inclination as to some of the magic values used in the various 12 * workarounds! 13 */ 14 #define CACHELINE_BYTES 64 15 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) 16 17 /* 18 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 19 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 20 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 21 * 22 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 23 * cacheline, the Head Pointer must not be greater than the Tail 24 * Pointer." 25 */ 26 #define I915_RING_FREE_SPACE 64 27 28 struct intel_hw_status_page { 29 u32 *page_addr; 30 unsigned int gfx_addr; 31 struct drm_i915_gem_object *obj; 32 }; 33 34 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 35 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 36 37 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 38 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 39 40 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 41 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 42 43 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 44 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 45 46 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 47 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 48 49 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 50 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 51 52 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to 53 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. 54 */ 55 #define gen8_semaphore_seqno_size sizeof(uint64_t) 56 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ 57 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) 58 #define GEN8_SIGNAL_OFFSET(__ring, to) \ 59 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 60 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) 61 #define GEN8_WAIT_OFFSET(__ring, from) \ 62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) 64 65 #define GEN8_RING_SEMAPHORE_INIT(e) do { \ 66 if (!dev_priv->semaphore_obj) { \ 67 break; \ 68 } \ 69 (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \ 70 (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \ 71 (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \ 72 (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \ 73 (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \ 74 (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \ 75 } while(0) 76 77 enum intel_ring_hangcheck_action { 78 HANGCHECK_IDLE = 0, 79 HANGCHECK_WAIT, 80 HANGCHECK_ACTIVE, 81 HANGCHECK_KICK, 82 HANGCHECK_HUNG, 83 }; 84 85 #define HANGCHECK_SCORE_RING_HUNG 31 86 87 struct intel_ring_hangcheck { 88 u64 acthd; 89 u32 seqno; 90 unsigned user_interrupts; 91 int score; 92 enum intel_ring_hangcheck_action action; 93 int deadlock; 94 u32 instdone[I915_NUM_INSTDONE_REG]; 95 }; 96 97 struct intel_ringbuffer { 98 struct drm_i915_gem_object *obj; 99 char __iomem *virtual_start; 100 struct i915_vma *vma; 101 102 struct intel_engine_cs *engine; 103 struct list_head link; 104 105 u32 head; 106 u32 tail; 107 int space; 108 int size; 109 int effective_size; 110 int reserved_size; 111 112 /** We track the position of the requests in the ring buffer, and 113 * when each is retired we increment last_retired_head as the GPU 114 * must have finished processing the request and so we know we 115 * can advance the ringbuffer up to that position. 116 * 117 * last_retired_head is set to -1 after the value is consumed so 118 * we can detect new retirements. 119 */ 120 u32 last_retired_head; 121 }; 122 123 struct intel_context; 124 struct drm_i915_reg_table; 125 126 /* 127 * we use a single page to load ctx workarounds so all of these 128 * values are referred in terms of dwords 129 * 130 * struct i915_wa_ctx_bb: 131 * offset: specifies batch starting position, also helpful in case 132 * if we want to have multiple batches at different offsets based on 133 * some criteria. It is not a requirement at the moment but provides 134 * an option for future use. 135 * size: size of the batch in DWORDS 136 */ 137 struct i915_ctx_workarounds { 138 struct i915_wa_ctx_bb { 139 u32 offset; 140 u32 size; 141 } indirect_ctx, per_ctx; 142 struct drm_i915_gem_object *obj; 143 }; 144 145 struct intel_engine_cs { 146 const char *name; 147 enum intel_engine_id { 148 RCS = 0, 149 BCS, 150 VCS, 151 VCS2, /* Keep instances of the same type engine together. */ 152 VECS 153 } id; 154 #define I915_NUM_ENGINES 5 155 #define _VCS(n) (VCS + (n)) 156 unsigned int exec_id; 157 unsigned int hw_id; 158 unsigned int guc_id; /* XXX same as hw_id? */ 159 u32 mmio_base; 160 struct drm_device *dev; 161 struct intel_ringbuffer *buffer; 162 struct list_head buffers; 163 164 /* 165 * A pool of objects to use as shadow copies of client batch buffers 166 * when the command parser is enabled. Prevents the client from 167 * modifying the batch contents after software parsing. 168 */ 169 struct i915_gem_batch_pool batch_pool; 170 171 struct intel_hw_status_page status_page; 172 struct i915_ctx_workarounds wa_ctx; 173 174 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 175 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 176 struct drm_i915_gem_request *trace_irq_req; 177 bool __must_check (*irq_get)(struct intel_engine_cs *ring); 178 void (*irq_put)(struct intel_engine_cs *ring); 179 180 int (*init_hw)(struct intel_engine_cs *ring); 181 182 int (*init_context)(struct drm_i915_gem_request *req); 183 184 void (*write_tail)(struct intel_engine_cs *ring, 185 u32 value); 186 int __must_check (*flush)(struct drm_i915_gem_request *req, 187 u32 invalidate_domains, 188 u32 flush_domains); 189 int (*add_request)(struct drm_i915_gem_request *req); 190 /* Some chipsets are not quite as coherent as advertised and need 191 * an expensive kick to force a true read of the up-to-date seqno. 192 * However, the up-to-date seqno is not always required and the last 193 * seen value is good enough. Note that the seqno will always be 194 * monotonic, even if not coherent. 195 */ 196 void (*irq_seqno_barrier)(struct intel_engine_cs *ring); 197 u32 (*get_seqno)(struct intel_engine_cs *ring); 198 void (*set_seqno)(struct intel_engine_cs *ring, 199 u32 seqno); 200 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, 201 u64 offset, u32 length, 202 unsigned dispatch_flags); 203 #define I915_DISPATCH_SECURE 0x1 204 #define I915_DISPATCH_PINNED 0x2 205 #define I915_DISPATCH_RS 0x4 206 void (*cleanup)(struct intel_engine_cs *ring); 207 208 /* GEN8 signal/wait table - never trust comments! 209 * signal to signal to signal to signal to signal to 210 * RCS VCS BCS VECS VCS2 211 * -------------------------------------------------------------------- 212 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | 213 * |------------------------------------------------------------------- 214 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | 215 * |------------------------------------------------------------------- 216 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | 217 * |------------------------------------------------------------------- 218 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | 219 * |------------------------------------------------------------------- 220 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | 221 * |------------------------------------------------------------------- 222 * 223 * Generalization: 224 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) 225 * ie. transpose of g(x, y) 226 * 227 * sync from sync from sync from sync from sync from 228 * RCS VCS BCS VECS VCS2 229 * -------------------------------------------------------------------- 230 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | 231 * |------------------------------------------------------------------- 232 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | 233 * |------------------------------------------------------------------- 234 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | 235 * |------------------------------------------------------------------- 236 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | 237 * |------------------------------------------------------------------- 238 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | 239 * |------------------------------------------------------------------- 240 * 241 * Generalization: 242 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) 243 * ie. transpose of f(x, y) 244 */ 245 struct { 246 u32 sync_seqno[I915_NUM_ENGINES-1]; 247 248 union { 249 struct { 250 /* our mbox written by others */ 251 u32 wait[I915_NUM_ENGINES]; 252 /* mboxes this ring signals to */ 253 i915_reg_t signal[I915_NUM_ENGINES]; 254 } mbox; 255 u64 signal_ggtt[I915_NUM_ENGINES]; 256 }; 257 258 /* AKA wait() */ 259 int (*sync_to)(struct drm_i915_gem_request *to_req, 260 struct intel_engine_cs *from, 261 u32 seqno); 262 int (*signal)(struct drm_i915_gem_request *signaller_req, 263 /* num_dwords needed by caller */ 264 unsigned int num_dwords); 265 } semaphore; 266 267 /* Execlists */ 268 struct tasklet_struct irq_tasklet; 269 struct lock execlist_lock; /* used inside tasklet, use spin_lock_bh */ 270 struct list_head execlist_queue; 271 struct list_head execlist_retired_req_list; 272 unsigned int fw_domains; 273 unsigned int next_context_status_buffer; 274 unsigned int idle_lite_restore_wa; 275 bool disable_lite_restore_wa; 276 u32 ctx_desc_template; 277 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ 278 int (*emit_request)(struct drm_i915_gem_request *request); 279 int (*emit_flush)(struct drm_i915_gem_request *request, 280 u32 invalidate_domains, 281 u32 flush_domains); 282 int (*emit_bb_start)(struct drm_i915_gem_request *req, 283 u64 offset, unsigned dispatch_flags); 284 285 /** 286 * List of objects currently involved in rendering from the 287 * ringbuffer. 288 * 289 * Includes buffers having the contents of their GPU caches 290 * flushed, not necessarily primitives. last_read_req 291 * represents when the rendering involved will be completed. 292 * 293 * A reference is held on the buffer while on this list. 294 */ 295 struct list_head active_list; 296 297 /** 298 * List of breadcrumbs associated with GPU requests currently 299 * outstanding. 300 */ 301 struct list_head request_list; 302 303 /** 304 * Seqno of request most recently submitted to request_list. 305 * Used exclusively by hang checker to avoid grabbing lock while 306 * inspecting request list. 307 */ 308 u32 last_submitted_seqno; 309 unsigned user_interrupts; 310 311 bool gpu_caches_dirty; 312 313 wait_queue_head_t irq_queue; 314 315 struct intel_context *last_context; 316 317 struct intel_ring_hangcheck hangcheck; 318 319 struct { 320 struct drm_i915_gem_object *obj; 321 u32 gtt_offset; 322 volatile u32 *cpu_page; 323 } scratch; 324 325 bool needs_cmd_parser; 326 327 /* 328 * Table of commands the command parser needs to know about 329 * for this ring. 330 */ 331 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 332 333 /* 334 * Table of registers allowed in commands that read/write registers. 335 */ 336 const struct drm_i915_reg_table *reg_tables; 337 int reg_table_count; 338 339 /* 340 * Returns the bitmask for the length field of the specified command. 341 * Return 0 for an unrecognized/invalid command. 342 * 343 * If the command parser finds an entry for a command in the ring's 344 * cmd_tables, it gets the command's length based on the table entry. 345 * If not, it calls this function to determine the per-ring length field 346 * encoding for the command (i.e. certain opcode ranges use certain bits 347 * to encode the command length in the header). 348 */ 349 u32 (*get_cmd_length_mask)(u32 cmd_header); 350 }; 351 352 static inline bool 353 intel_engine_initialized(struct intel_engine_cs *engine) 354 { 355 return engine->dev != NULL; 356 } 357 358 static inline unsigned 359 intel_engine_flag(struct intel_engine_cs *engine) 360 { 361 return 1 << engine->id; 362 } 363 364 static inline u32 365 intel_ring_sync_index(struct intel_engine_cs *engine, 366 struct intel_engine_cs *other) 367 { 368 int idx; 369 370 /* 371 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; 372 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; 373 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; 374 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; 375 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 376 */ 377 378 idx = (other - engine) - 1; 379 if (idx < 0) 380 idx += I915_NUM_ENGINES; 381 382 return idx; 383 } 384 385 static inline void 386 intel_flush_status_page(struct intel_engine_cs *engine, int reg) 387 { 388 mb(); 389 linux_clflush(&engine->status_page.page_addr[reg]); 390 mb(); 391 } 392 393 static inline u32 394 intel_read_status_page(struct intel_engine_cs *engine, int reg) 395 { 396 /* Ensure that the compiler doesn't optimize away the load. */ 397 return READ_ONCE(engine->status_page.page_addr[reg]); 398 } 399 400 static inline void 401 intel_write_status_page(struct intel_engine_cs *engine, 402 int reg, u32 value) 403 { 404 engine->status_page.page_addr[reg] = value; 405 } 406 407 /* 408 * Reads a dword out of the status page, which is written to from the command 409 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 410 * MI_STORE_DATA_IMM. 411 * 412 * The following dwords have a reserved meaning: 413 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 414 * 0x04: ring 0 head pointer 415 * 0x05: ring 1 head pointer (915-class) 416 * 0x06: ring 2 head pointer (915-class) 417 * 0x10-0x1b: Context status DWords (GM45) 418 * 0x1f: Last written status offset. (GM45) 419 * 0x20-0x2f: Reserved (Gen6+) 420 * 421 * The area from dword 0x30 to 0x3ff is available for driver usage. 422 */ 423 #define I915_GEM_HWS_INDEX 0x30 424 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 425 #define I915_GEM_HWS_SCRATCH_INDEX 0x40 426 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 427 428 struct intel_ringbuffer * 429 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); 430 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 431 struct intel_ringbuffer *ringbuf); 432 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 433 void intel_ringbuffer_free(struct intel_ringbuffer *ring); 434 435 void intel_stop_engine(struct intel_engine_cs *engine); 436 void intel_cleanup_engine(struct intel_engine_cs *engine); 437 438 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); 439 440 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); 441 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 442 static inline void intel_ring_emit(struct intel_engine_cs *engine, 443 u32 data) 444 { 445 struct intel_ringbuffer *ringbuf = engine->buffer; 446 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 447 ringbuf->tail += 4; 448 } 449 static inline void intel_ring_emit_reg(struct intel_engine_cs *engine, 450 i915_reg_t reg) 451 { 452 intel_ring_emit(engine, i915_mmio_reg_offset(reg)); 453 } 454 static inline void intel_ring_advance(struct intel_engine_cs *engine) 455 { 456 struct intel_ringbuffer *ringbuf = engine->buffer; 457 ringbuf->tail &= ringbuf->size - 1; 458 } 459 int __intel_ring_space(int head, int tail, int size); 460 void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 461 bool intel_engine_stopped(struct intel_engine_cs *engine); 462 463 int __must_check intel_engine_idle(struct intel_engine_cs *engine); 464 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); 465 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); 466 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); 467 468 void intel_fini_pipe_control(struct intel_engine_cs *engine); 469 int intel_init_pipe_control(struct intel_engine_cs *engine); 470 471 int intel_init_render_ring_buffer(struct drm_device *dev); 472 int intel_init_bsd_ring_buffer(struct drm_device *dev); 473 int intel_init_bsd2_ring_buffer(struct drm_device *dev); 474 int intel_init_blt_ring_buffer(struct drm_device *dev); 475 int intel_init_vebox_ring_buffer(struct drm_device *dev); 476 477 u64 intel_ring_get_active_head(struct intel_engine_cs *engine); 478 479 int init_workarounds_ring(struct intel_engine_cs *engine); 480 481 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 482 { 483 return ringbuf->tail; 484 } 485 486 /* 487 * Arbitrary size for largest possible 'add request' sequence. The code paths 488 * are complex and variable. Empirical measurement shows that the worst case 489 * is ILK at 136 words. Reserving too much is better than reserving too little 490 * as that allows for corner cases that might have been missed. So the figure 491 * has been rounded up to 160 words. 492 */ 493 #define MIN_SPACE_FOR_ADD_REQUEST 160 494 495 /* 496 * Reserve space in the ring to guarantee that the i915_add_request() call 497 * will always have sufficient room to do its stuff. The request creation 498 * code calls this automatically. 499 */ 500 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); 501 /* Cancel the reservation, e.g. because the request is being discarded. */ 502 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); 503 /* Use the reserved space - for use by i915_add_request() only. */ 504 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); 505 /* Finish with the reserved space - for use by i915_add_request() only. */ 506 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); 507 508 /* Legacy ringbuffer specific portion of reservation code: */ 509 int intel_ring_reserve_space(struct drm_i915_gem_request *request); 510 511 #endif /* _INTEL_RINGBUFFER_H_ */ 512