1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 #include <linux/hashtable.h> 5 #include "i915_gem_batch_pool.h" 6 7 #define I915_CMD_HASH_ORDER 9 8 9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 11 * to give some inclination as to some of the magic values used in the various 12 * workarounds! 13 */ 14 #define CACHELINE_BYTES 64 15 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) 16 17 /* 18 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 19 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 20 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 21 * 22 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 23 * cacheline, the Head Pointer must not be greater than the Tail 24 * Pointer." 25 */ 26 #define I915_RING_FREE_SPACE 64 27 28 struct intel_hw_status_page { 29 u32 *page_addr; 30 unsigned int gfx_addr; 31 struct drm_i915_gem_object *obj; 32 }; 33 34 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 35 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 36 37 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 38 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 39 40 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 41 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 42 43 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 44 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 45 46 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 47 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 48 49 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 50 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 51 52 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to 53 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. 54 */ 55 #define gen8_semaphore_seqno_size sizeof(uint64_t) 56 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ 57 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) 58 #define GEN8_SIGNAL_OFFSET(__ring, to) \ 59 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 60 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) 61 #define GEN8_WAIT_OFFSET(__ring, from) \ 62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) 64 65 enum intel_ring_hangcheck_action { 66 HANGCHECK_IDLE = 0, 67 HANGCHECK_WAIT, 68 HANGCHECK_ACTIVE, 69 HANGCHECK_KICK, 70 HANGCHECK_HUNG, 71 }; 72 73 #define HANGCHECK_SCORE_RING_HUNG 31 74 75 struct intel_ring_hangcheck { 76 u64 acthd; 77 unsigned long user_interrupts; 78 u32 seqno; 79 int score; 80 enum intel_ring_hangcheck_action action; 81 int deadlock; 82 u32 instdone[I915_NUM_INSTDONE_REG]; 83 }; 84 85 struct intel_ringbuffer { 86 struct drm_i915_gem_object *obj; 87 void __iomem *virtual_start; 88 struct i915_vma *vma; 89 90 struct intel_engine_cs *engine; 91 struct list_head link; 92 93 u32 head; 94 u32 tail; 95 int space; 96 int size; 97 int effective_size; 98 99 /** We track the position of the requests in the ring buffer, and 100 * when each is retired we increment last_retired_head as the GPU 101 * must have finished processing the request and so we know we 102 * can advance the ringbuffer up to that position. 103 * 104 * last_retired_head is set to -1 after the value is consumed so 105 * we can detect new retirements. 106 */ 107 u32 last_retired_head; 108 }; 109 110 struct i915_gem_context; 111 struct drm_i915_reg_table; 112 113 /* 114 * we use a single page to load ctx workarounds so all of these 115 * values are referred in terms of dwords 116 * 117 * struct i915_wa_ctx_bb: 118 * offset: specifies batch starting position, also helpful in case 119 * if we want to have multiple batches at different offsets based on 120 * some criteria. It is not a requirement at the moment but provides 121 * an option for future use. 122 * size: size of the batch in DWORDS 123 */ 124 struct i915_ctx_workarounds { 125 struct i915_wa_ctx_bb { 126 u32 offset; 127 u32 size; 128 } indirect_ctx, per_ctx; 129 struct drm_i915_gem_object *obj; 130 }; 131 132 struct drm_i915_gem_request; 133 134 struct intel_engine_cs { 135 struct drm_i915_private *i915; 136 const char *name; 137 enum intel_engine_id { 138 RCS = 0, 139 BCS, 140 VCS, 141 VCS2, /* Keep instances of the same type engine together. */ 142 VECS 143 } id; 144 #define I915_NUM_ENGINES 5 145 #define _VCS(n) (VCS + (n)) 146 unsigned int exec_id; 147 unsigned int hw_id; 148 unsigned int guc_id; /* XXX same as hw_id? */ 149 u32 mmio_base; 150 struct intel_ringbuffer *buffer; 151 struct list_head buffers; 152 153 /* Rather than have every client wait upon all user interrupts, 154 * with the herd waking after every interrupt and each doing the 155 * heavyweight seqno dance, we delegate the task (of being the 156 * bottom-half of the user interrupt) to the first client. After 157 * every interrupt, we wake up one client, who does the heavyweight 158 * coherent seqno read and either goes back to sleep (if incomplete), 159 * or wakes up all the completed clients in parallel, before then 160 * transferring the bottom-half status to the next client in the queue. 161 * 162 * Compared to walking the entire list of waiters in a single dedicated 163 * bottom-half, we reduce the latency of the first waiter by avoiding 164 * a context switch, but incur additional coherent seqno reads when 165 * following the chain of request breadcrumbs. Since it is most likely 166 * that we have a single client waiting on each seqno, then reducing 167 * the overhead of waking that client is much preferred. 168 */ 169 struct intel_breadcrumbs { 170 struct task_struct *irq_seqno_bh; /* bh for user interrupts */ 171 unsigned long irq_wakeups; 172 bool irq_posted; 173 174 struct lock lock; /* protects the lists of requests */ 175 struct rb_root waiters; /* sorted by retirement, priority */ 176 struct rb_root signals; /* sorted by retirement */ 177 struct intel_wait *first_wait; /* oldest waiter by retirement */ 178 struct task_struct *signaler; /* used for fence signalling */ 179 struct drm_i915_gem_request *first_signal; 180 struct timer_list fake_irq; /* used after a missed interrupt */ 181 182 bool irq_enabled : 1; 183 bool rpm_wakelock : 1; 184 } breadcrumbs; 185 186 /* 187 * A pool of objects to use as shadow copies of client batch buffers 188 * when the command parser is enabled. Prevents the client from 189 * modifying the batch contents after software parsing. 190 */ 191 struct i915_gem_batch_pool batch_pool; 192 193 struct intel_hw_status_page status_page; 194 struct i915_ctx_workarounds wa_ctx; 195 196 u32 irq_keep_mask; /* always keep these interrupts */ 197 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 198 void (*irq_enable)(struct intel_engine_cs *ring); 199 void (*irq_disable)(struct intel_engine_cs *ring); 200 201 int (*init_hw)(struct intel_engine_cs *ring); 202 203 int (*init_context)(struct drm_i915_gem_request *req); 204 205 void (*write_tail)(struct intel_engine_cs *ring, 206 u32 value); 207 int __must_check (*flush)(struct drm_i915_gem_request *req, 208 u32 invalidate_domains, 209 u32 flush_domains); 210 int (*add_request)(struct drm_i915_gem_request *req); 211 /* Some chipsets are not quite as coherent as advertised and need 212 * an expensive kick to force a true read of the up-to-date seqno. 213 * However, the up-to-date seqno is not always required and the last 214 * seen value is good enough. Note that the seqno will always be 215 * monotonic, even if not coherent. 216 */ 217 void (*irq_seqno_barrier)(struct intel_engine_cs *ring); 218 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, 219 u64 offset, u32 length, 220 unsigned dispatch_flags); 221 #define I915_DISPATCH_SECURE 0x1 222 #define I915_DISPATCH_PINNED 0x2 223 #define I915_DISPATCH_RS 0x4 224 void (*cleanup)(struct intel_engine_cs *ring); 225 226 /* GEN8 signal/wait table - never trust comments! 227 * signal to signal to signal to signal to signal to 228 * RCS VCS BCS VECS VCS2 229 * -------------------------------------------------------------------- 230 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | 231 * |------------------------------------------------------------------- 232 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | 233 * |------------------------------------------------------------------- 234 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | 235 * |------------------------------------------------------------------- 236 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | 237 * |------------------------------------------------------------------- 238 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | 239 * |------------------------------------------------------------------- 240 * 241 * Generalization: 242 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) 243 * ie. transpose of g(x, y) 244 * 245 * sync from sync from sync from sync from sync from 246 * RCS VCS BCS VECS VCS2 247 * -------------------------------------------------------------------- 248 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | 249 * |------------------------------------------------------------------- 250 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | 251 * |------------------------------------------------------------------- 252 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | 253 * |------------------------------------------------------------------- 254 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | 255 * |------------------------------------------------------------------- 256 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | 257 * |------------------------------------------------------------------- 258 * 259 * Generalization: 260 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) 261 * ie. transpose of f(x, y) 262 */ 263 struct { 264 u32 sync_seqno[I915_NUM_ENGINES-1]; 265 266 union { 267 struct { 268 /* our mbox written by others */ 269 u32 wait[I915_NUM_ENGINES]; 270 /* mboxes this ring signals to */ 271 i915_reg_t signal[I915_NUM_ENGINES]; 272 } mbox; 273 u64 signal_ggtt[I915_NUM_ENGINES]; 274 }; 275 276 /* AKA wait() */ 277 int (*sync_to)(struct drm_i915_gem_request *to_req, 278 struct intel_engine_cs *from, 279 u32 seqno); 280 int (*signal)(struct drm_i915_gem_request *signaller_req, 281 /* num_dwords needed by caller */ 282 unsigned int num_dwords); 283 } semaphore; 284 285 /* Execlists */ 286 struct tasklet_struct irq_tasklet; 287 struct lock execlist_lock; /* used inside tasklet, use spin_lock_bh */ 288 struct list_head execlist_queue; 289 unsigned int fw_domains; 290 unsigned int next_context_status_buffer; 291 unsigned int idle_lite_restore_wa; 292 bool disable_lite_restore_wa; 293 u32 ctx_desc_template; 294 int (*emit_request)(struct drm_i915_gem_request *request); 295 int (*emit_flush)(struct drm_i915_gem_request *request, 296 u32 invalidate_domains, 297 u32 flush_domains); 298 int (*emit_bb_start)(struct drm_i915_gem_request *req, 299 u64 offset, unsigned dispatch_flags); 300 301 /** 302 * List of objects currently involved in rendering from the 303 * ringbuffer. 304 * 305 * Includes buffers having the contents of their GPU caches 306 * flushed, not necessarily primitives. last_read_req 307 * represents when the rendering involved will be completed. 308 * 309 * A reference is held on the buffer while on this list. 310 */ 311 struct list_head active_list; 312 313 /** 314 * List of breadcrumbs associated with GPU requests currently 315 * outstanding. 316 */ 317 struct list_head request_list; 318 319 /** 320 * Seqno of request most recently submitted to request_list. 321 * Used exclusively by hang checker to avoid grabbing lock while 322 * inspecting request list. 323 */ 324 u32 last_submitted_seqno; 325 326 bool gpu_caches_dirty; 327 328 struct i915_gem_context *last_context; 329 330 struct intel_ring_hangcheck hangcheck; 331 332 struct { 333 struct drm_i915_gem_object *obj; 334 u32 gtt_offset; 335 } scratch; 336 337 bool needs_cmd_parser; 338 339 /* 340 * Table of commands the command parser needs to know about 341 * for this ring. 342 */ 343 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 344 345 /* 346 * Table of registers allowed in commands that read/write registers. 347 */ 348 const struct drm_i915_reg_table *reg_tables; 349 int reg_table_count; 350 351 /* 352 * Returns the bitmask for the length field of the specified command. 353 * Return 0 for an unrecognized/invalid command. 354 * 355 * If the command parser finds an entry for a command in the ring's 356 * cmd_tables, it gets the command's length based on the table entry. 357 * If not, it calls this function to determine the per-ring length field 358 * encoding for the command (i.e. certain opcode ranges use certain bits 359 * to encode the command length in the header). 360 */ 361 u32 (*get_cmd_length_mask)(u32 cmd_header); 362 }; 363 364 static inline bool 365 intel_engine_initialized(const struct intel_engine_cs *engine) 366 { 367 return engine->i915 != NULL; 368 } 369 370 static inline unsigned 371 intel_engine_flag(const struct intel_engine_cs *engine) 372 { 373 return 1 << engine->id; 374 } 375 376 static inline u32 377 intel_ring_sync_index(struct intel_engine_cs *engine, 378 struct intel_engine_cs *other) 379 { 380 int idx; 381 382 /* 383 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; 384 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; 385 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; 386 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; 387 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 388 */ 389 390 idx = (other - engine) - 1; 391 if (idx < 0) 392 idx += I915_NUM_ENGINES; 393 394 return idx; 395 } 396 397 static inline void 398 intel_flush_status_page(struct intel_engine_cs *engine, int reg) 399 { 400 mb(); 401 linux_clflush(&engine->status_page.page_addr[reg]); 402 mb(); 403 } 404 405 static inline u32 406 intel_read_status_page(struct intel_engine_cs *engine, int reg) 407 { 408 /* Ensure that the compiler doesn't optimize away the load. */ 409 return READ_ONCE(engine->status_page.page_addr[reg]); 410 } 411 412 static inline void 413 intel_write_status_page(struct intel_engine_cs *engine, 414 int reg, u32 value) 415 { 416 engine->status_page.page_addr[reg] = value; 417 } 418 419 /* 420 * Reads a dword out of the status page, which is written to from the command 421 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 422 * MI_STORE_DATA_IMM. 423 * 424 * The following dwords have a reserved meaning: 425 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 426 * 0x04: ring 0 head pointer 427 * 0x05: ring 1 head pointer (915-class) 428 * 0x06: ring 2 head pointer (915-class) 429 * 0x10-0x1b: Context status DWords (GM45) 430 * 0x1f: Last written status offset. (GM45) 431 * 0x20-0x2f: Reserved (Gen6+) 432 * 433 * The area from dword 0x30 to 0x3ff is available for driver usage. 434 */ 435 #define I915_GEM_HWS_INDEX 0x30 436 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 437 #define I915_GEM_HWS_SCRATCH_INDEX 0x40 438 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 439 440 struct intel_ringbuffer * 441 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); 442 int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, 443 struct intel_ringbuffer *ringbuf); 444 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 445 void intel_ringbuffer_free(struct intel_ringbuffer *ring); 446 447 void intel_stop_engine(struct intel_engine_cs *engine); 448 void intel_cleanup_engine(struct intel_engine_cs *engine); 449 450 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); 451 452 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); 453 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 454 static inline void intel_ring_emit(struct intel_engine_cs *engine, 455 u32 data) 456 { 457 struct intel_ringbuffer *ringbuf = engine->buffer; 458 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 459 ringbuf->tail += 4; 460 } 461 static inline void intel_ring_emit_reg(struct intel_engine_cs *engine, 462 i915_reg_t reg) 463 { 464 intel_ring_emit(engine, i915_mmio_reg_offset(reg)); 465 } 466 static inline void intel_ring_advance(struct intel_engine_cs *engine) 467 { 468 struct intel_ringbuffer *ringbuf = engine->buffer; 469 ringbuf->tail &= ringbuf->size - 1; 470 } 471 int __intel_ring_space(int head, int tail, int size); 472 void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 473 474 int __must_check intel_engine_idle(struct intel_engine_cs *engine); 475 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); 476 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); 477 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); 478 479 int intel_init_pipe_control(struct intel_engine_cs *engine, int size); 480 void intel_fini_pipe_control(struct intel_engine_cs *engine); 481 482 int intel_init_render_ring_buffer(struct drm_device *dev); 483 int intel_init_bsd_ring_buffer(struct drm_device *dev); 484 int intel_init_bsd2_ring_buffer(struct drm_device *dev); 485 int intel_init_blt_ring_buffer(struct drm_device *dev); 486 int intel_init_vebox_ring_buffer(struct drm_device *dev); 487 488 u64 intel_ring_get_active_head(struct intel_engine_cs *engine); 489 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) 490 { 491 return intel_read_status_page(engine, I915_GEM_HWS_INDEX); 492 } 493 494 int init_workarounds_ring(struct intel_engine_cs *engine); 495 496 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 497 { 498 return ringbuf->tail; 499 } 500 501 /* 502 * Arbitrary size for largest possible 'add request' sequence. The code paths 503 * are complex and variable. Empirical measurement shows that the worst case 504 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, 505 * we need to allocate double the largest single packet within that emission 506 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). 507 */ 508 #define MIN_SPACE_FOR_ADD_REQUEST 336 509 510 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) 511 { 512 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; 513 } 514 515 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ 516 struct intel_wait { 517 struct rb_node node; 518 struct task_struct *tsk; 519 u32 seqno; 520 }; 521 522 struct intel_signal_node { 523 struct rb_node node; 524 struct intel_wait wait; 525 }; 526 527 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); 528 529 static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) 530 { 531 wait->tsk = current; 532 wait->seqno = seqno; 533 } 534 535 static inline bool intel_wait_complete(const struct intel_wait *wait) 536 { 537 return RB_EMPTY_NODE(&wait->node); 538 } 539 540 bool intel_engine_add_wait(struct intel_engine_cs *engine, 541 struct intel_wait *wait); 542 void intel_engine_remove_wait(struct intel_engine_cs *engine, 543 struct intel_wait *wait); 544 void intel_engine_enable_signaling(struct drm_i915_gem_request *request); 545 546 static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine) 547 { 548 return READ_ONCE(engine->breadcrumbs.irq_seqno_bh); 549 } 550 551 static inline bool intel_engine_wakeup(struct intel_engine_cs *engine) 552 { 553 bool wakeup = false; 554 struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); 555 /* Note that for this not to dangerously chase a dangling pointer, 556 * the caller is responsible for ensure that the task remain valid for 557 * wake_up_process() i.e. that the RCU grace period cannot expire. 558 * 559 * Also note that tsk is likely to be in !TASK_RUNNING state so an 560 * early test for tsk->state != TASK_RUNNING before wake_up_process() 561 * is unlikely to be beneficial. 562 */ 563 if (tsk) 564 wakeup = wake_up_process(tsk); 565 return wakeup; 566 } 567 568 void intel_engine_enable_fake_irq(struct intel_engine_cs *engine); 569 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); 570 unsigned int intel_kick_waiters(struct drm_i915_private *i915); 571 unsigned int intel_kick_signalers(struct drm_i915_private *i915); 572 573 #endif /* _INTEL_RINGBUFFER_H_ */ 574