1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 #include <linux/hashtable.h> 5 #include "i915_gem_batch_pool.h" 6 #include "i915_gem_request.h" 7 #include "i915_gem_timeline.h" 8 #include "i915_selftest.h" 9 10 #define I915_CMD_HASH_ORDER 9 11 12 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 13 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 14 * to give some inclination as to some of the magic values used in the various 15 * workarounds! 16 */ 17 #define CACHELINE_BYTES 64 18 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) 19 20 /* 21 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 22 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 23 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 24 * 25 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 26 * cacheline, the Head Pointer must not be greater than the Tail 27 * Pointer." 28 */ 29 #define I915_RING_FREE_SPACE 64 30 31 struct intel_hw_status_page { 32 struct i915_vma *vma; 33 u32 *page_addr; 34 u32 ggtt_offset; 35 }; 36 37 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) 38 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) 39 40 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) 41 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) 42 43 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) 44 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) 45 46 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) 47 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) 48 49 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) 50 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) 51 52 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) 53 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) 54 55 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to 56 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. 57 */ 58 #define gen8_semaphore_seqno_size sizeof(uint64_t) 59 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ 60 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) 61 #define GEN8_SIGNAL_OFFSET(__ring, to) \ 62 (dev_priv->semaphore->node.start + \ 63 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) 64 #define GEN8_WAIT_OFFSET(__ring, from) \ 65 (dev_priv->semaphore->node.start + \ 66 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) 67 68 enum intel_engine_hangcheck_action { 69 ENGINE_IDLE = 0, 70 ENGINE_WAIT, 71 ENGINE_ACTIVE_SEQNO, 72 ENGINE_ACTIVE_HEAD, 73 ENGINE_ACTIVE_SUBUNITS, 74 ENGINE_WAIT_KICK, 75 ENGINE_DEAD, 76 }; 77 78 static inline const char * 79 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) 80 { 81 switch (a) { 82 case ENGINE_IDLE: 83 return "idle"; 84 case ENGINE_WAIT: 85 return "wait"; 86 case ENGINE_ACTIVE_SEQNO: 87 return "active seqno"; 88 case ENGINE_ACTIVE_HEAD: 89 return "active head"; 90 case ENGINE_ACTIVE_SUBUNITS: 91 return "active subunits"; 92 case ENGINE_WAIT_KICK: 93 return "wait kick"; 94 case ENGINE_DEAD: 95 return "dead"; 96 } 97 98 return "unknown"; 99 } 100 101 #define I915_MAX_SLICES 3 102 #define I915_MAX_SUBSLICES 3 103 104 #define instdone_slice_mask(dev_priv__) \ 105 (INTEL_GEN(dev_priv__) == 7 ? \ 106 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) 107 108 #define instdone_subslice_mask(dev_priv__) \ 109 (INTEL_GEN(dev_priv__) == 7 ? \ 110 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask) 111 112 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ 113 for ((slice__) = 0, (subslice__) = 0; \ 114 (slice__) < I915_MAX_SLICES; \ 115 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ 116 (slice__) += ((subslice__) == 0)) \ 117 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ 118 (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) 119 120 struct intel_instdone { 121 u32 instdone; 122 /* The following exist only in the RCS engine */ 123 u32 slice_common; 124 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; 125 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; 126 }; 127 128 struct intel_engine_hangcheck { 129 u64 acthd; 130 u32 seqno; 131 enum intel_engine_hangcheck_action action; 132 unsigned long action_timestamp; 133 int deadlock; 134 struct intel_instdone instdone; 135 bool stalled; 136 }; 137 138 struct intel_ring { 139 struct i915_vma *vma; 140 void *vaddr; 141 142 struct intel_engine_cs *engine; 143 144 struct list_head request_list; 145 146 u32 head; 147 u32 tail; 148 u32 emit; 149 150 int space; 151 int size; 152 int effective_size; 153 }; 154 155 struct i915_gem_context; 156 struct drm_i915_reg_table; 157 158 /* 159 * we use a single page to load ctx workarounds so all of these 160 * values are referred in terms of dwords 161 * 162 * struct i915_wa_ctx_bb: 163 * offset: specifies batch starting position, also helpful in case 164 * if we want to have multiple batches at different offsets based on 165 * some criteria. It is not a requirement at the moment but provides 166 * an option for future use. 167 * size: size of the batch in DWORDS 168 */ 169 struct i915_ctx_workarounds { 170 struct i915_wa_ctx_bb { 171 u32 offset; 172 u32 size; 173 } indirect_ctx, per_ctx; 174 struct i915_vma *vma; 175 }; 176 177 struct drm_i915_gem_request; 178 struct intel_render_state; 179 180 /* 181 * Engine IDs definitions. 182 * Keep instances of the same type engine together. 183 */ 184 enum intel_engine_id { 185 RCS = 0, 186 BCS, 187 VCS, 188 VCS2, 189 #define _VCS(n) (VCS + (n)) 190 VECS 191 }; 192 193 struct intel_engine_cs { 194 struct drm_i915_private *i915; 195 const char *name; 196 enum intel_engine_id id; 197 unsigned int exec_id; 198 unsigned int hw_id; 199 unsigned int guc_id; 200 u32 mmio_base; 201 unsigned int irq_shift; 202 struct intel_ring *buffer; 203 struct intel_timeline *timeline; 204 205 struct intel_render_state *render_state; 206 207 atomic_t irq_count; 208 unsigned long irq_posted; 209 #define ENGINE_IRQ_BREADCRUMB 0 210 #define ENGINE_IRQ_EXECLIST 1 211 212 /* Rather than have every client wait upon all user interrupts, 213 * with the herd waking after every interrupt and each doing the 214 * heavyweight seqno dance, we delegate the task (of being the 215 * bottom-half of the user interrupt) to the first client. After 216 * every interrupt, we wake up one client, who does the heavyweight 217 * coherent seqno read and either goes back to sleep (if incomplete), 218 * or wakes up all the completed clients in parallel, before then 219 * transferring the bottom-half status to the next client in the queue. 220 * 221 * Compared to walking the entire list of waiters in a single dedicated 222 * bottom-half, we reduce the latency of the first waiter by avoiding 223 * a context switch, but incur additional coherent seqno reads when 224 * following the chain of request breadcrumbs. Since it is most likely 225 * that we have a single client waiting on each seqno, then reducing 226 * the overhead of waking that client is much preferred. 227 */ 228 struct intel_breadcrumbs { 229 spinlock_t irq_lock; /* protects irq_*; irqsafe */ 230 struct intel_wait *irq_wait; /* oldest waiter by retirement */ 231 232 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */ 233 struct rb_root waiters; /* sorted by retirement, priority */ 234 struct rb_root signals; /* sorted by retirement */ 235 struct task_struct *signaler; /* used for fence signalling */ 236 struct drm_i915_gem_request __rcu *first_signal; 237 struct timer_list fake_irq; /* used after a missed interrupt */ 238 struct timer_list hangcheck; /* detect missed interrupts */ 239 240 unsigned int hangcheck_interrupts; 241 242 bool irq_armed : 1; 243 bool irq_enabled : 1; 244 I915_SELFTEST_DECLARE(bool mock : 1); 245 } breadcrumbs; 246 247 /* 248 * A pool of objects to use as shadow copies of client batch buffers 249 * when the command parser is enabled. Prevents the client from 250 * modifying the batch contents after software parsing. 251 */ 252 struct i915_gem_batch_pool batch_pool; 253 254 struct intel_hw_status_page status_page; 255 struct i915_ctx_workarounds wa_ctx; 256 struct i915_vma *scratch; 257 258 u32 irq_keep_mask; /* always keep these interrupts */ 259 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 260 void (*irq_enable)(struct intel_engine_cs *engine); 261 void (*irq_disable)(struct intel_engine_cs *engine); 262 263 int (*init_hw)(struct intel_engine_cs *engine); 264 void (*reset_hw)(struct intel_engine_cs *engine, 265 struct drm_i915_gem_request *req); 266 267 void (*set_default_submission)(struct intel_engine_cs *engine); 268 269 int (*context_pin)(struct intel_engine_cs *engine, 270 struct i915_gem_context *ctx); 271 void (*context_unpin)(struct intel_engine_cs *engine, 272 struct i915_gem_context *ctx); 273 int (*request_alloc)(struct drm_i915_gem_request *req); 274 int (*init_context)(struct drm_i915_gem_request *req); 275 276 int (*emit_flush)(struct drm_i915_gem_request *request, 277 u32 mode); 278 #define EMIT_INVALIDATE BIT(0) 279 #define EMIT_FLUSH BIT(1) 280 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) 281 int (*emit_bb_start)(struct drm_i915_gem_request *req, 282 u64 offset, u32 length, 283 unsigned int dispatch_flags); 284 #define I915_DISPATCH_SECURE BIT(0) 285 #define I915_DISPATCH_PINNED BIT(1) 286 #define I915_DISPATCH_RS BIT(2) 287 void (*emit_breadcrumb)(struct drm_i915_gem_request *req, 288 u32 *cs); 289 int emit_breadcrumb_sz; 290 291 /* Pass the request to the hardware queue (e.g. directly into 292 * the legacy ringbuffer or to the end of an execlist). 293 * 294 * This is called from an atomic context with irqs disabled; must 295 * be irq safe. 296 */ 297 void (*submit_request)(struct drm_i915_gem_request *req); 298 299 /* Call when the priority on a request has changed and it and its 300 * dependencies may need rescheduling. Note the request itself may 301 * not be ready to run! 302 * 303 * Called under the struct_mutex. 304 */ 305 void (*schedule)(struct drm_i915_gem_request *request, 306 int priority); 307 308 /* Some chipsets are not quite as coherent as advertised and need 309 * an expensive kick to force a true read of the up-to-date seqno. 310 * However, the up-to-date seqno is not always required and the last 311 * seen value is good enough. Note that the seqno will always be 312 * monotonic, even if not coherent. 313 */ 314 void (*irq_seqno_barrier)(struct intel_engine_cs *engine); 315 void (*cleanup)(struct intel_engine_cs *engine); 316 317 /* GEN8 signal/wait table - never trust comments! 318 * signal to signal to signal to signal to signal to 319 * RCS VCS BCS VECS VCS2 320 * -------------------------------------------------------------------- 321 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | 322 * |------------------------------------------------------------------- 323 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | 324 * |------------------------------------------------------------------- 325 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | 326 * |------------------------------------------------------------------- 327 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | 328 * |------------------------------------------------------------------- 329 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | 330 * |------------------------------------------------------------------- 331 * 332 * Generalization: 333 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) 334 * ie. transpose of g(x, y) 335 * 336 * sync from sync from sync from sync from sync from 337 * RCS VCS BCS VECS VCS2 338 * -------------------------------------------------------------------- 339 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | 340 * |------------------------------------------------------------------- 341 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | 342 * |------------------------------------------------------------------- 343 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | 344 * |------------------------------------------------------------------- 345 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | 346 * |------------------------------------------------------------------- 347 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | 348 * |------------------------------------------------------------------- 349 * 350 * Generalization: 351 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) 352 * ie. transpose of f(x, y) 353 */ 354 struct { 355 union { 356 #define GEN6_SEMAPHORE_LAST VECS_HW 357 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) 358 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) 359 struct { 360 /* our mbox written by others */ 361 u32 wait[GEN6_NUM_SEMAPHORES]; 362 /* mboxes this ring signals to */ 363 i915_reg_t signal[GEN6_NUM_SEMAPHORES]; 364 } mbox; 365 u64 signal_ggtt[I915_NUM_ENGINES]; 366 }; 367 368 /* AKA wait() */ 369 int (*sync_to)(struct drm_i915_gem_request *req, 370 struct drm_i915_gem_request *signal); 371 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); 372 } semaphore; 373 374 /* Execlists */ 375 struct tasklet_struct irq_tasklet; 376 struct execlist_port { 377 struct drm_i915_gem_request *request; 378 unsigned int count; 379 GEM_DEBUG_DECL(u32 context_id); 380 } execlist_port[2]; 381 struct rb_root execlist_queue; 382 struct rb_node *execlist_first; 383 unsigned int fw_domains; 384 385 /* Contexts are pinned whilst they are active on the GPU. The last 386 * context executed remains active whilst the GPU is idle - the 387 * switch away and write to the context object only occurs on the 388 * next execution. Contexts are only unpinned on retirement of the 389 * following request ensuring that we can always write to the object 390 * on the context switch even after idling. Across suspend, we switch 391 * to the kernel context and trash it as the save may not happen 392 * before the hardware is powered down. 393 */ 394 struct i915_gem_context *last_retired_context; 395 396 /* We track the current MI_SET_CONTEXT in order to eliminate 397 * redudant context switches. This presumes that requests are not 398 * reordered! Or when they are the tracking is updated along with 399 * the emission of individual requests into the legacy command 400 * stream (ring). 401 */ 402 struct i915_gem_context *legacy_active_context; 403 404 /* status_notifier: list of callbacks for context-switch changes */ 405 struct atomic_notifier_head context_status_notifier; 406 407 struct intel_engine_hangcheck hangcheck; 408 409 bool needs_cmd_parser; 410 411 /* 412 * Table of commands the command parser needs to know about 413 * for this engine. 414 */ 415 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 416 417 /* 418 * Table of registers allowed in commands that read/write registers. 419 */ 420 const struct drm_i915_reg_table *reg_tables; 421 int reg_table_count; 422 423 /* 424 * Returns the bitmask for the length field of the specified command. 425 * Return 0 for an unrecognized/invalid command. 426 * 427 * If the command parser finds an entry for a command in the engine's 428 * cmd_tables, it gets the command's length based on the table entry. 429 * If not, it calls this function to determine the per-engine length 430 * field encoding for the command (i.e. different opcode ranges use 431 * certain bits to encode the command length in the header). 432 */ 433 u32 (*get_cmd_length_mask)(u32 cmd_header); 434 }; 435 436 static inline unsigned int 437 intel_engine_flag(const struct intel_engine_cs *engine) 438 { 439 return BIT(engine->id); 440 } 441 442 static inline u32 443 intel_read_status_page(struct intel_engine_cs *engine, int reg) 444 { 445 /* Ensure that the compiler doesn't optimize away the load. */ 446 return READ_ONCE(engine->status_page.page_addr[reg]); 447 } 448 449 static inline void 450 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) 451 { 452 /* Writing into the status page should be done sparingly. Since 453 * we do when we are uncertain of the device state, we take a bit 454 * of extra paranoia to try and ensure that the HWS takes the value 455 * we give and that it doesn't end up trapped inside the CPU! 456 */ 457 if (static_cpu_has(X86_FEATURE_CLFLUSH)) { 458 mb(); 459 linux_clflush(&engine->status_page.page_addr[reg]); 460 engine->status_page.page_addr[reg] = value; 461 linux_clflush(&engine->status_page.page_addr[reg]); 462 mb(); 463 } else { 464 WRITE_ONCE(engine->status_page.page_addr[reg], value); 465 } 466 } 467 468 /* 469 * Reads a dword out of the status page, which is written to from the command 470 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 471 * MI_STORE_DATA_IMM. 472 * 473 * The following dwords have a reserved meaning: 474 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 475 * 0x04: ring 0 head pointer 476 * 0x05: ring 1 head pointer (915-class) 477 * 0x06: ring 2 head pointer (915-class) 478 * 0x10-0x1b: Context status DWords (GM45) 479 * 0x1f: Last written status offset. (GM45) 480 * 0x20-0x2f: Reserved (Gen6+) 481 * 482 * The area from dword 0x30 to 0x3ff is available for driver usage. 483 */ 484 #define I915_GEM_HWS_INDEX 0x30 485 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 486 #define I915_GEM_HWS_SCRATCH_INDEX 0x40 487 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 488 489 struct intel_ring * 490 intel_engine_create_ring(struct intel_engine_cs *engine, int size); 491 int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); 492 void intel_ring_reset(struct intel_ring *ring, u32 tail); 493 void intel_ring_update_space(struct intel_ring *ring); 494 void intel_ring_unpin(struct intel_ring *ring); 495 void intel_ring_free(struct intel_ring *ring); 496 497 void intel_engine_stop(struct intel_engine_cs *engine); 498 void intel_engine_cleanup(struct intel_engine_cs *engine); 499 500 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); 501 502 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 503 504 u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n); 505 506 static inline void 507 intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) 508 { 509 /* Dummy function. 510 * 511 * This serves as a placeholder in the code so that the reader 512 * can compare against the preceding intel_ring_begin() and 513 * check that the number of dwords emitted matches the space 514 * reserved for the command packet (i.e. the value passed to 515 * intel_ring_begin()). 516 */ 517 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs); 518 } 519 520 static inline u32 521 intel_ring_wrap(const struct intel_ring *ring, u32 pos) 522 { 523 return pos & (ring->size - 1); 524 } 525 526 static inline u32 527 intel_ring_offset(const struct drm_i915_gem_request *req, void *addr) 528 { 529 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 530 u32 offset = addr - req->ring->vaddr; 531 GEM_BUG_ON(offset > req->ring->size); 532 return intel_ring_wrap(req->ring, offset); 533 } 534 535 static inline void 536 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) 537 { 538 /* We could combine these into a single tail operation, but keeping 539 * them as seperate tests will help identify the cause should one 540 * ever fire. 541 */ 542 GEM_BUG_ON(!IS_ALIGNED(tail, 8)); 543 GEM_BUG_ON(tail >= ring->size); 544 } 545 546 static inline unsigned int 547 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) 548 { 549 /* Whilst writes to the tail are strictly order, there is no 550 * serialisation between readers and the writers. The tail may be 551 * read by i915_gem_request_retire() just as it is being updated 552 * by execlists, as although the breadcrumb is complete, the context 553 * switch hasn't been seen. 554 */ 555 assert_ring_tail_valid(ring, tail); 556 ring->tail = tail; 557 return tail; 558 } 559 560 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); 561 562 void intel_engine_setup_common(struct intel_engine_cs *engine); 563 int intel_engine_init_common(struct intel_engine_cs *engine); 564 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); 565 void intel_engine_cleanup_common(struct intel_engine_cs *engine); 566 567 int intel_init_render_ring_buffer(struct intel_engine_cs *engine); 568 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); 569 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine); 570 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); 571 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); 572 573 u64 intel_engine_get_active_head(struct intel_engine_cs *engine); 574 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine); 575 576 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) 577 { 578 return intel_read_status_page(engine, I915_GEM_HWS_INDEX); 579 } 580 581 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) 582 { 583 /* We are only peeking at the tail of the submit queue (and not the 584 * queue itself) in order to gain a hint as to the current active 585 * state of the engine. Callers are not expected to be taking 586 * engine->timeline->lock, nor are they expected to be concerned 587 * wtih serialising this hint with anything, so document it as 588 * a hint and nothing more. 589 */ 590 return READ_ONCE(engine->timeline->seqno); 591 } 592 593 int init_workarounds_ring(struct intel_engine_cs *engine); 594 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req); 595 596 void intel_engine_get_instdone(struct intel_engine_cs *engine, 597 struct intel_instdone *instdone); 598 599 /* 600 * Arbitrary size for largest possible 'add request' sequence. The code paths 601 * are complex and variable. Empirical measurement shows that the worst case 602 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, 603 * we need to allocate double the largest single packet within that emission 604 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). 605 */ 606 #define MIN_SPACE_FOR_ADD_REQUEST 336 607 608 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) 609 { 610 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; 611 } 612 613 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ 614 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); 615 616 static inline void intel_wait_init(struct intel_wait *wait, 617 struct drm_i915_gem_request *rq) 618 { 619 wait->tsk = current; 620 wait->request = rq; 621 } 622 623 static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) 624 { 625 wait->tsk = current; 626 wait->seqno = seqno; 627 } 628 629 static inline bool intel_wait_has_seqno(const struct intel_wait *wait) 630 { 631 return wait->seqno; 632 } 633 634 static inline bool 635 intel_wait_update_seqno(struct intel_wait *wait, u32 seqno) 636 { 637 wait->seqno = seqno; 638 return intel_wait_has_seqno(wait); 639 } 640 641 static inline bool 642 intel_wait_update_request(struct intel_wait *wait, 643 const struct drm_i915_gem_request *rq) 644 { 645 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq)); 646 } 647 648 static inline bool 649 intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno) 650 { 651 return wait->seqno == seqno; 652 } 653 654 static inline bool 655 intel_wait_check_request(const struct intel_wait *wait, 656 const struct drm_i915_gem_request *rq) 657 { 658 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq)); 659 } 660 661 static inline bool intel_wait_complete(const struct intel_wait *wait) 662 { 663 return RB_EMPTY_NODE(&wait->node); 664 } 665 666 bool intel_engine_add_wait(struct intel_engine_cs *engine, 667 struct intel_wait *wait); 668 void intel_engine_remove_wait(struct intel_engine_cs *engine, 669 struct intel_wait *wait); 670 void intel_engine_enable_signaling(struct drm_i915_gem_request *request); 671 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request); 672 673 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) 674 { 675 return READ_ONCE(engine->breadcrumbs.irq_wait); 676 } 677 678 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine); 679 #define ENGINE_WAKEUP_WAITER BIT(0) 680 #define ENGINE_WAKEUP_ASLEEP BIT(1) 681 682 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); 683 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); 684 685 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); 686 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); 687 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine); 688 689 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) 690 { 691 memset(batch, 0, 6 * sizeof(u32)); 692 693 batch[0] = GFX_OP_PIPE_CONTROL(6); 694 batch[1] = flags; 695 batch[2] = offset; 696 697 return batch + 6; 698 } 699 700 bool intel_engine_is_idle(struct intel_engine_cs *engine); 701 bool intel_engines_are_idle(struct drm_i915_private *dev_priv); 702 703 void intel_engines_reset_default_submission(struct drm_i915_private *i915); 704 705 #endif /* _INTEL_RINGBUFFER_H_ */ 706