1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 #include <linux/hashtable.h> 5 #include "i915_gem_batch_pool.h" 6 #include "i915_gem_request.h" 7 8 #define I915_CMD_HASH_ORDER 9 9 10 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 11 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 12 * to give some inclination as to some of the magic values used in the various 13 * workarounds! 14 */ 15 #define CACHELINE_BYTES 64 16 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) 17 18 /* 19 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 20 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 21 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 22 * 23 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 24 * cacheline, the Head Pointer must not be greater than the Tail 25 * Pointer." 26 */ 27 #define I915_RING_FREE_SPACE 64 28 29 struct intel_hw_status_page { 30 struct i915_vma *vma; 31 u32 *page_addr; 32 u32 ggtt_offset; 33 }; 34 35 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) 36 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) 37 38 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) 39 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) 40 41 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) 42 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) 43 44 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) 45 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) 46 47 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) 48 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) 49 50 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) 51 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) 52 53 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to 54 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. 55 */ 56 #define gen8_semaphore_seqno_size sizeof(uint64_t) 57 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ 58 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) 59 #define GEN8_SIGNAL_OFFSET(__ring, to) \ 60 (dev_priv->semaphore->node.start + \ 61 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) 62 #define GEN8_WAIT_OFFSET(__ring, from) \ 63 (dev_priv->semaphore->node.start + \ 64 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) 65 66 enum intel_engine_hangcheck_action { 67 HANGCHECK_IDLE = 0, 68 HANGCHECK_WAIT, 69 HANGCHECK_ACTIVE, 70 HANGCHECK_KICK, 71 HANGCHECK_HUNG, 72 }; 73 74 #define HANGCHECK_SCORE_RING_HUNG 31 75 76 #define I915_MAX_SLICES 3 77 #define I915_MAX_SUBSLICES 3 78 79 #define instdone_slice_mask(dev_priv__) \ 80 (INTEL_GEN(dev_priv__) == 7 ? \ 81 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) 82 83 #define instdone_subslice_mask(dev_priv__) \ 84 (INTEL_GEN(dev_priv__) == 7 ? \ 85 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask) 86 87 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ 88 for ((slice__) = 0, (subslice__) = 0; \ 89 (slice__) < I915_MAX_SLICES; \ 90 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ 91 (slice__) += ((subslice__) == 0)) \ 92 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ 93 (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) 94 95 struct intel_instdone { 96 u32 instdone; 97 /* The following exist only in the RCS engine */ 98 u32 slice_common; 99 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; 100 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; 101 }; 102 103 struct intel_engine_hangcheck { 104 u64 acthd; 105 u32 seqno; 106 int score; 107 enum intel_engine_hangcheck_action action; 108 int deadlock; 109 struct intel_instdone instdone; 110 }; 111 112 struct intel_ring { 113 struct i915_vma *vma; 114 void *vaddr; 115 116 struct intel_engine_cs *engine; 117 118 struct list_head request_list; 119 120 u32 head; 121 u32 tail; 122 int space; 123 int size; 124 int effective_size; 125 126 /** We track the position of the requests in the ring buffer, and 127 * when each is retired we increment last_retired_head as the GPU 128 * must have finished processing the request and so we know we 129 * can advance the ringbuffer up to that position. 130 * 131 * last_retired_head is set to -1 after the value is consumed so 132 * we can detect new retirements. 133 */ 134 u32 last_retired_head; 135 }; 136 137 struct i915_gem_context; 138 struct drm_i915_reg_table; 139 140 /* 141 * we use a single page to load ctx workarounds so all of these 142 * values are referred in terms of dwords 143 * 144 * struct i915_wa_ctx_bb: 145 * offset: specifies batch starting position, also helpful in case 146 * if we want to have multiple batches at different offsets based on 147 * some criteria. It is not a requirement at the moment but provides 148 * an option for future use. 149 * size: size of the batch in DWORDS 150 */ 151 struct i915_ctx_workarounds { 152 struct i915_wa_ctx_bb { 153 u32 offset; 154 u32 size; 155 } indirect_ctx, per_ctx; 156 struct i915_vma *vma; 157 }; 158 159 struct drm_i915_gem_request; 160 161 struct intel_engine_cs { 162 struct drm_i915_private *i915; 163 const char *name; 164 enum intel_engine_id { 165 RCS = 0, 166 BCS, 167 VCS, 168 VCS2, /* Keep instances of the same type engine together. */ 169 VECS 170 } id; 171 #define I915_NUM_ENGINES 5 172 #define _VCS(n) (VCS + (n)) 173 unsigned int exec_id; 174 enum intel_engine_hw_id { 175 RCS_HW = 0, 176 VCS_HW, 177 BCS_HW, 178 VECS_HW, 179 VCS2_HW 180 } hw_id; 181 enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */ 182 u64 fence_context; 183 u32 mmio_base; 184 unsigned int irq_shift; 185 struct intel_ring *buffer; 186 187 /* Rather than have every client wait upon all user interrupts, 188 * with the herd waking after every interrupt and each doing the 189 * heavyweight seqno dance, we delegate the task (of being the 190 * bottom-half of the user interrupt) to the first client. After 191 * every interrupt, we wake up one client, who does the heavyweight 192 * coherent seqno read and either goes back to sleep (if incomplete), 193 * or wakes up all the completed clients in parallel, before then 194 * transferring the bottom-half status to the next client in the queue. 195 * 196 * Compared to walking the entire list of waiters in a single dedicated 197 * bottom-half, we reduce the latency of the first waiter by avoiding 198 * a context switch, but incur additional coherent seqno reads when 199 * following the chain of request breadcrumbs. Since it is most likely 200 * that we have a single client waiting on each seqno, then reducing 201 * the overhead of waking that client is much preferred. 202 */ 203 struct intel_breadcrumbs { 204 struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */ 205 bool irq_posted; 206 207 spinlock_t lock; /* protects the lists of requests */ 208 struct rb_root waiters; /* sorted by retirement, priority */ 209 struct rb_root signals; /* sorted by retirement */ 210 struct intel_wait *first_wait; /* oldest waiter by retirement */ 211 struct task_struct *signaler; /* used for fence signalling */ 212 struct drm_i915_gem_request *first_signal; 213 struct timer_list fake_irq; /* used after a missed interrupt */ 214 struct timer_list hangcheck; /* detect missed interrupts */ 215 216 unsigned long timeout; 217 218 bool irq_enabled : 1; 219 bool rpm_wakelock : 1; 220 } breadcrumbs; 221 222 /* 223 * A pool of objects to use as shadow copies of client batch buffers 224 * when the command parser is enabled. Prevents the client from 225 * modifying the batch contents after software parsing. 226 */ 227 struct i915_gem_batch_pool batch_pool; 228 229 struct intel_hw_status_page status_page; 230 struct i915_ctx_workarounds wa_ctx; 231 struct i915_vma *scratch; 232 233 u32 irq_keep_mask; /* always keep these interrupts */ 234 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 235 void (*irq_enable)(struct intel_engine_cs *engine); 236 void (*irq_disable)(struct intel_engine_cs *engine); 237 238 int (*init_hw)(struct intel_engine_cs *engine); 239 void (*reset_hw)(struct intel_engine_cs *engine, 240 struct drm_i915_gem_request *req); 241 242 int (*init_context)(struct drm_i915_gem_request *req); 243 244 int (*emit_flush)(struct drm_i915_gem_request *request, 245 u32 mode); 246 #define EMIT_INVALIDATE BIT(0) 247 #define EMIT_FLUSH BIT(1) 248 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) 249 int (*emit_bb_start)(struct drm_i915_gem_request *req, 250 u64 offset, u32 length, 251 unsigned int dispatch_flags); 252 #define I915_DISPATCH_SECURE BIT(0) 253 #define I915_DISPATCH_PINNED BIT(1) 254 #define I915_DISPATCH_RS BIT(2) 255 int (*emit_request)(struct drm_i915_gem_request *req); 256 257 /* Pass the request to the hardware queue (e.g. directly into 258 * the legacy ringbuffer or to the end of an execlist). 259 * 260 * This is called from an atomic context with irqs disabled; must 261 * be irq safe. 262 */ 263 void (*submit_request)(struct drm_i915_gem_request *req); 264 265 /* Some chipsets are not quite as coherent as advertised and need 266 * an expensive kick to force a true read of the up-to-date seqno. 267 * However, the up-to-date seqno is not always required and the last 268 * seen value is good enough. Note that the seqno will always be 269 * monotonic, even if not coherent. 270 */ 271 void (*irq_seqno_barrier)(struct intel_engine_cs *engine); 272 void (*cleanup)(struct intel_engine_cs *engine); 273 274 /* GEN8 signal/wait table - never trust comments! 275 * signal to signal to signal to signal to signal to 276 * RCS VCS BCS VECS VCS2 277 * -------------------------------------------------------------------- 278 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | 279 * |------------------------------------------------------------------- 280 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | 281 * |------------------------------------------------------------------- 282 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | 283 * |------------------------------------------------------------------- 284 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | 285 * |------------------------------------------------------------------- 286 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | 287 * |------------------------------------------------------------------- 288 * 289 * Generalization: 290 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) 291 * ie. transpose of g(x, y) 292 * 293 * sync from sync from sync from sync from sync from 294 * RCS VCS BCS VECS VCS2 295 * -------------------------------------------------------------------- 296 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | 297 * |------------------------------------------------------------------- 298 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | 299 * |------------------------------------------------------------------- 300 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | 301 * |------------------------------------------------------------------- 302 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | 303 * |------------------------------------------------------------------- 304 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | 305 * |------------------------------------------------------------------- 306 * 307 * Generalization: 308 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) 309 * ie. transpose of f(x, y) 310 */ 311 struct { 312 u32 sync_seqno[I915_NUM_ENGINES-1]; 313 314 union { 315 #define GEN6_SEMAPHORE_LAST VECS_HW 316 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) 317 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) 318 struct { 319 /* our mbox written by others */ 320 u32 wait[GEN6_NUM_SEMAPHORES]; 321 /* mboxes this ring signals to */ 322 i915_reg_t signal[GEN6_NUM_SEMAPHORES]; 323 } mbox; 324 u64 signal_ggtt[I915_NUM_ENGINES]; 325 }; 326 327 /* AKA wait() */ 328 int (*sync_to)(struct drm_i915_gem_request *req, 329 struct drm_i915_gem_request *signal); 330 int (*signal)(struct drm_i915_gem_request *req); 331 } semaphore; 332 333 /* Execlists */ 334 struct tasklet_struct irq_tasklet; 335 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ 336 struct execlist_port { 337 struct drm_i915_gem_request *request; 338 unsigned int count; 339 } execlist_port[2]; 340 struct list_head execlist_queue; 341 unsigned int fw_domains; 342 bool disable_lite_restore_wa; 343 bool preempt_wa; 344 u32 ctx_desc_template; 345 346 /** 347 * List of breadcrumbs associated with GPU requests currently 348 * outstanding. 349 */ 350 struct list_head request_list; 351 352 /** 353 * Seqno of request most recently submitted to request_list. 354 * Used exclusively by hang checker to avoid grabbing lock while 355 * inspecting request list. 356 */ 357 u32 last_submitted_seqno; 358 u32 last_pending_seqno; 359 360 /* An RCU guarded pointer to the last request. No reference is 361 * held to the request, users must carefully acquire a reference to 362 * the request using i915_gem_active_get_rcu(), or hold the 363 * struct_mutex. 364 */ 365 struct i915_gem_active last_request; 366 367 struct i915_gem_context *last_context; 368 369 struct intel_engine_hangcheck hangcheck; 370 371 bool needs_cmd_parser; 372 373 /* 374 * Table of commands the command parser needs to know about 375 * for this engine. 376 */ 377 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 378 379 /* 380 * Table of registers allowed in commands that read/write registers. 381 */ 382 const struct drm_i915_reg_table *reg_tables; 383 int reg_table_count; 384 385 /* 386 * Returns the bitmask for the length field of the specified command. 387 * Return 0 for an unrecognized/invalid command. 388 * 389 * If the command parser finds an entry for a command in the engine's 390 * cmd_tables, it gets the command's length based on the table entry. 391 * If not, it calls this function to determine the per-engine length 392 * field encoding for the command (i.e. different opcode ranges use 393 * certain bits to encode the command length in the header). 394 */ 395 u32 (*get_cmd_length_mask)(u32 cmd_header); 396 }; 397 398 static inline unsigned 399 intel_engine_flag(const struct intel_engine_cs *engine) 400 { 401 return 1 << engine->id; 402 } 403 404 static inline u32 405 intel_engine_sync_index(struct intel_engine_cs *engine, 406 struct intel_engine_cs *other) 407 { 408 int idx; 409 410 /* 411 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; 412 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; 413 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; 414 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; 415 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 416 */ 417 418 idx = (other->id - engine->id) - 1; 419 if (idx < 0) 420 idx += I915_NUM_ENGINES; 421 422 return idx; 423 } 424 425 static inline void 426 intel_flush_status_page(struct intel_engine_cs *engine, int reg) 427 { 428 mb(); 429 linux_clflush(&engine->status_page.page_addr[reg]); 430 mb(); 431 } 432 433 static inline u32 434 intel_read_status_page(struct intel_engine_cs *engine, int reg) 435 { 436 /* Ensure that the compiler doesn't optimize away the load. */ 437 return READ_ONCE(engine->status_page.page_addr[reg]); 438 } 439 440 static inline void 441 intel_write_status_page(struct intel_engine_cs *engine, 442 int reg, u32 value) 443 { 444 engine->status_page.page_addr[reg] = value; 445 } 446 447 /* 448 * Reads a dword out of the status page, which is written to from the command 449 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 450 * MI_STORE_DATA_IMM. 451 * 452 * The following dwords have a reserved meaning: 453 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 454 * 0x04: ring 0 head pointer 455 * 0x05: ring 1 head pointer (915-class) 456 * 0x06: ring 2 head pointer (915-class) 457 * 0x10-0x1b: Context status DWords (GM45) 458 * 0x1f: Last written status offset. (GM45) 459 * 0x20-0x2f: Reserved (Gen6+) 460 * 461 * The area from dword 0x30 to 0x3ff is available for driver usage. 462 */ 463 #define I915_GEM_HWS_INDEX 0x30 464 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 465 #define I915_GEM_HWS_SCRATCH_INDEX 0x40 466 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 467 468 struct intel_ring * 469 intel_engine_create_ring(struct intel_engine_cs *engine, int size); 470 int intel_ring_pin(struct intel_ring *ring); 471 void intel_ring_unpin(struct intel_ring *ring); 472 void intel_ring_free(struct intel_ring *ring); 473 474 void intel_engine_stop(struct intel_engine_cs *engine); 475 void intel_engine_cleanup(struct intel_engine_cs *engine); 476 477 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); 478 479 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); 480 481 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); 482 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 483 484 static inline void intel_ring_emit(struct intel_ring *ring, u32 data) 485 { 486 *(uint32_t *)(ring->vaddr + ring->tail) = data; 487 ring->tail += 4; 488 } 489 490 static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg) 491 { 492 intel_ring_emit(ring, i915_mmio_reg_offset(reg)); 493 } 494 495 static inline void intel_ring_advance(struct intel_ring *ring) 496 { 497 /* Dummy function. 498 * 499 * This serves as a placeholder in the code so that the reader 500 * can compare against the preceding intel_ring_begin() and 501 * check that the number of dwords emitted matches the space 502 * reserved for the command packet (i.e. the value passed to 503 * intel_ring_begin()). 504 */ 505 } 506 507 static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value) 508 { 509 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 510 return value & (ring->size - 1); 511 } 512 513 int __intel_ring_space(int head, int tail, int size); 514 void intel_ring_update_space(struct intel_ring *ring); 515 516 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); 517 518 void intel_engine_setup_common(struct intel_engine_cs *engine); 519 int intel_engine_init_common(struct intel_engine_cs *engine); 520 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); 521 void intel_engine_cleanup_common(struct intel_engine_cs *engine); 522 523 static inline int intel_engine_idle(struct intel_engine_cs *engine, 524 unsigned int flags) 525 { 526 /* Wait upon the last request to be completed */ 527 return i915_gem_active_wait_unlocked(&engine->last_request, 528 flags, NULL, NULL); 529 } 530 531 int intel_init_render_ring_buffer(struct intel_engine_cs *engine); 532 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); 533 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine); 534 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); 535 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); 536 537 u64 intel_engine_get_active_head(struct intel_engine_cs *engine); 538 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine); 539 540 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) 541 { 542 return intel_read_status_page(engine, I915_GEM_HWS_INDEX); 543 } 544 545 int init_workarounds_ring(struct intel_engine_cs *engine); 546 547 void intel_engine_get_instdone(struct intel_engine_cs *engine, 548 struct intel_instdone *instdone); 549 550 /* 551 * Arbitrary size for largest possible 'add request' sequence. The code paths 552 * are complex and variable. Empirical measurement shows that the worst case 553 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, 554 * we need to allocate double the largest single packet within that emission 555 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). 556 */ 557 #define MIN_SPACE_FOR_ADD_REQUEST 336 558 559 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) 560 { 561 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; 562 } 563 564 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ 565 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); 566 567 static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) 568 { 569 wait->tsk = current; 570 wait->seqno = seqno; 571 } 572 573 static inline bool intel_wait_complete(const struct intel_wait *wait) 574 { 575 return RB_EMPTY_NODE(&wait->node); 576 } 577 578 bool intel_engine_add_wait(struct intel_engine_cs *engine, 579 struct intel_wait *wait); 580 void intel_engine_remove_wait(struct intel_engine_cs *engine, 581 struct intel_wait *wait); 582 void intel_engine_enable_signaling(struct drm_i915_gem_request *request); 583 584 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) 585 { 586 return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh); 587 } 588 589 static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine) 590 { 591 bool wakeup = false; 592 593 /* Note that for this not to dangerously chase a dangling pointer, 594 * we must hold the rcu_read_lock here. 595 * 596 * Also note that tsk is likely to be in !TASK_RUNNING state so an 597 * early test for tsk->state != TASK_RUNNING before wake_up_process() 598 * is unlikely to be beneficial. 599 */ 600 if (intel_engine_has_waiter(engine)) { 601 struct task_struct *tsk; 602 603 rcu_read_lock(); 604 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); 605 if (tsk) 606 wakeup = wake_up_process(tsk); 607 rcu_read_unlock(); 608 } 609 610 return wakeup; 611 } 612 613 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); 614 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); 615 unsigned int intel_kick_waiters(struct drm_i915_private *i915); 616 unsigned int intel_kick_signalers(struct drm_i915_private *i915); 617 618 static inline bool intel_engine_is_active(struct intel_engine_cs *engine) 619 { 620 return i915_gem_active_isset(&engine->last_request); 621 } 622 623 #endif /* _INTEL_RINGBUFFER_H_ */ 624