1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 #include <linux/hashtable.h> 5 #include "i915_gem_batch_pool.h" 6 7 #define I915_CMD_HASH_ORDER 9 8 9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 11 * to give some inclination as to some of the magic values used in the various 12 * workarounds! 13 */ 14 #define CACHELINE_BYTES 64 15 16 /* 17 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 18 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 19 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 20 * 21 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 22 * cacheline, the Head Pointer must not be greater than the Tail 23 * Pointer." 24 */ 25 #define I915_RING_FREE_SPACE 64 26 27 struct intel_hw_status_page { 28 u32 *page_addr; 29 unsigned int gfx_addr; 30 struct drm_i915_gem_object *obj; 31 }; 32 33 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 34 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 35 36 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 37 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 38 39 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 40 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 41 42 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 43 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 44 45 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 46 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 47 48 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 49 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 50 51 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to 52 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. 53 */ 54 #define i915_semaphore_seqno_size sizeof(uint64_t) 55 #define GEN8_SIGNAL_OFFSET(__ring, to) \ 56 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 57 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ 58 (i915_semaphore_seqno_size * (to))) 59 60 #define GEN8_WAIT_OFFSET(__ring, from) \ 61 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 62 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ 63 (i915_semaphore_seqno_size * (__ring)->id)) 64 65 #define GEN8_RING_SEMAPHORE_INIT do { \ 66 if (!dev_priv->semaphore_obj) { \ 67 break; \ 68 } \ 69 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ 70 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ 71 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ 72 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ 73 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ 74 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ 75 } while(0) 76 77 enum intel_ring_hangcheck_action { 78 HANGCHECK_IDLE = 0, 79 HANGCHECK_WAIT, 80 HANGCHECK_ACTIVE, 81 HANGCHECK_ACTIVE_LOOP, 82 HANGCHECK_KICK, 83 HANGCHECK_HUNG, 84 }; 85 86 #define HANGCHECK_SCORE_RING_HUNG 31 87 88 struct intel_ring_hangcheck { 89 u64 acthd; 90 u64 max_acthd; 91 u32 seqno; 92 int score; 93 enum intel_ring_hangcheck_action action; 94 int deadlock; 95 }; 96 97 struct intel_ringbuffer { 98 struct drm_i915_gem_object *obj; 99 char __iomem *virtual_start; 100 101 struct intel_engine_cs *ring; 102 103 u32 head; 104 u32 tail; 105 int space; 106 int size; 107 int effective_size; 108 109 /** We track the position of the requests in the ring buffer, and 110 * when each is retired we increment last_retired_head as the GPU 111 * must have finished processing the request and so we know we 112 * can advance the ringbuffer up to that position. 113 * 114 * last_retired_head is set to -1 after the value is consumed so 115 * we can detect new retirements. 116 */ 117 u32 last_retired_head; 118 }; 119 120 struct intel_context; 121 struct drm_i915_reg_descriptor; 122 123 struct intel_engine_cs { 124 const char *name; 125 enum intel_ring_id { 126 RCS = 0x0, 127 VCS, 128 BCS, 129 VECS, 130 VCS2 131 } id; 132 #define I915_NUM_RINGS 5 133 #define LAST_USER_RING (VECS + 1) 134 u32 mmio_base; 135 struct drm_device *dev; 136 struct intel_ringbuffer *buffer; 137 138 /* 139 * A pool of objects to use as shadow copies of client batch buffers 140 * when the command parser is enabled. Prevents the client from 141 * modifying the batch contents after software parsing. 142 */ 143 struct i915_gem_batch_pool batch_pool; 144 145 struct intel_hw_status_page status_page; 146 147 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 148 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 149 struct drm_i915_gem_request *trace_irq_req; 150 bool __must_check (*irq_get)(struct intel_engine_cs *ring); 151 void (*irq_put)(struct intel_engine_cs *ring); 152 153 int (*init_hw)(struct intel_engine_cs *ring); 154 155 int (*init_context)(struct intel_engine_cs *ring, 156 struct intel_context *ctx); 157 158 void (*write_tail)(struct intel_engine_cs *ring, 159 u32 value); 160 int __must_check (*flush)(struct intel_engine_cs *ring, 161 u32 invalidate_domains, 162 u32 flush_domains); 163 int (*add_request)(struct intel_engine_cs *ring); 164 /* Some chipsets are not quite as coherent as advertised and need 165 * an expensive kick to force a true read of the up-to-date seqno. 166 * However, the up-to-date seqno is not always required and the last 167 * seen value is good enough. Note that the seqno will always be 168 * monotonic, even if not coherent. 169 */ 170 u32 (*get_seqno)(struct intel_engine_cs *ring, 171 bool lazy_coherency); 172 void (*set_seqno)(struct intel_engine_cs *ring, 173 u32 seqno); 174 int (*dispatch_execbuffer)(struct intel_engine_cs *ring, 175 u64 offset, u32 length, 176 unsigned dispatch_flags); 177 #define I915_DISPATCH_SECURE 0x1 178 #define I915_DISPATCH_PINNED 0x2 179 void (*cleanup)(struct intel_engine_cs *ring); 180 181 /* GEN8 signal/wait table - never trust comments! 182 * signal to signal to signal to signal to signal to 183 * RCS VCS BCS VECS VCS2 184 * -------------------------------------------------------------------- 185 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | 186 * |------------------------------------------------------------------- 187 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | 188 * |------------------------------------------------------------------- 189 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | 190 * |------------------------------------------------------------------- 191 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | 192 * |------------------------------------------------------------------- 193 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | 194 * |------------------------------------------------------------------- 195 * 196 * Generalization: 197 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) 198 * ie. transpose of g(x, y) 199 * 200 * sync from sync from sync from sync from sync from 201 * RCS VCS BCS VECS VCS2 202 * -------------------------------------------------------------------- 203 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | 204 * |------------------------------------------------------------------- 205 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | 206 * |------------------------------------------------------------------- 207 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | 208 * |------------------------------------------------------------------- 209 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | 210 * |------------------------------------------------------------------- 211 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | 212 * |------------------------------------------------------------------- 213 * 214 * Generalization: 215 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) 216 * ie. transpose of f(x, y) 217 */ 218 struct { 219 u32 sync_seqno[I915_NUM_RINGS-1]; 220 221 union { 222 struct { 223 /* our mbox written by others */ 224 u32 wait[I915_NUM_RINGS]; 225 /* mboxes this ring signals to */ 226 u32 signal[I915_NUM_RINGS]; 227 } mbox; 228 u64 signal_ggtt[I915_NUM_RINGS]; 229 }; 230 231 /* AKA wait() */ 232 int (*sync_to)(struct intel_engine_cs *ring, 233 struct intel_engine_cs *to, 234 u32 seqno); 235 int (*signal)(struct intel_engine_cs *signaller, 236 /* num_dwords needed by caller */ 237 unsigned int num_dwords); 238 } semaphore; 239 240 /* Execlists */ 241 struct lock execlist_lock; 242 struct list_head execlist_queue; 243 struct list_head execlist_retired_req_list; 244 u8 next_context_status_buffer; 245 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ 246 int (*emit_request)(struct intel_ringbuffer *ringbuf, 247 struct drm_i915_gem_request *request); 248 int (*emit_flush)(struct intel_ringbuffer *ringbuf, 249 struct intel_context *ctx, 250 u32 invalidate_domains, 251 u32 flush_domains); 252 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, 253 struct intel_context *ctx, 254 u64 offset, unsigned dispatch_flags); 255 256 /** 257 * List of objects currently involved in rendering from the 258 * ringbuffer. 259 * 260 * Includes buffers having the contents of their GPU caches 261 * flushed, not necessarily primitives. last_read_req 262 * represents when the rendering involved will be completed. 263 * 264 * A reference is held on the buffer while on this list. 265 */ 266 struct list_head active_list; 267 268 /** 269 * List of breadcrumbs associated with GPU requests currently 270 * outstanding. 271 */ 272 struct list_head request_list; 273 274 /** 275 * Do we have some not yet emitted requests outstanding? 276 */ 277 struct drm_i915_gem_request *outstanding_lazy_request; 278 /** 279 * Seqno of request most recently submitted to request_list. 280 * Used exclusively by hang checker to avoid grabbing lock while 281 * inspecting request list. 282 */ 283 u32 last_submitted_seqno; 284 285 bool gpu_caches_dirty; 286 287 wait_queue_head_t irq_queue; 288 289 struct intel_context *default_context; 290 struct intel_context *last_context; 291 292 struct intel_ring_hangcheck hangcheck; 293 294 struct { 295 struct drm_i915_gem_object *obj; 296 u32 gtt_offset; 297 volatile u32 *cpu_page; 298 } scratch; 299 300 bool needs_cmd_parser; 301 302 /* 303 * Table of commands the command parser needs to know about 304 * for this ring. 305 */ 306 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 307 308 /* 309 * Table of registers allowed in commands that read/write registers. 310 */ 311 const struct drm_i915_reg_descriptor *reg_table; 312 int reg_count; 313 314 /* 315 * Table of registers allowed in commands that read/write registers, but 316 * only from the DRM master. 317 */ 318 const struct drm_i915_reg_descriptor *master_reg_table; 319 int master_reg_count; 320 321 /* 322 * Returns the bitmask for the length field of the specified command. 323 * Return 0 for an unrecognized/invalid command. 324 * 325 * If the command parser finds an entry for a command in the ring's 326 * cmd_tables, it gets the command's length based on the table entry. 327 * If not, it calls this function to determine the per-ring length field 328 * encoding for the command (i.e. certain opcode ranges use certain bits 329 * to encode the command length in the header). 330 */ 331 u32 (*get_cmd_length_mask)(u32 cmd_header); 332 }; 333 334 bool intel_ring_initialized(struct intel_engine_cs *ring); 335 336 static inline unsigned 337 intel_ring_flag(struct intel_engine_cs *ring) 338 { 339 return 1 << ring->id; 340 } 341 342 static inline u32 343 intel_ring_sync_index(struct intel_engine_cs *ring, 344 struct intel_engine_cs *other) 345 { 346 int idx; 347 348 /* 349 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; 350 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; 351 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; 352 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; 353 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 354 */ 355 356 idx = (other - ring) - 1; 357 if (idx < 0) 358 idx += I915_NUM_RINGS; 359 360 return idx; 361 } 362 363 static inline u32 364 intel_read_status_page(struct intel_engine_cs *ring, 365 int reg) 366 { 367 /* Ensure that the compiler doesn't optimize away the load. */ 368 barrier(); 369 return ring->status_page.page_addr[reg]; 370 } 371 372 static inline void 373 intel_write_status_page(struct intel_engine_cs *ring, 374 int reg, u32 value) 375 { 376 ring->status_page.page_addr[reg] = value; 377 } 378 379 /** 380 * Reads a dword out of the status page, which is written to from the command 381 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 382 * MI_STORE_DATA_IMM. 383 * 384 * The following dwords have a reserved meaning: 385 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 386 * 0x04: ring 0 head pointer 387 * 0x05: ring 1 head pointer (915-class) 388 * 0x06: ring 2 head pointer (915-class) 389 * 0x10-0x1b: Context status DWords (GM45) 390 * 0x1f: Last written status offset. (GM45) 391 * 0x20-0x2f: Reserved (Gen6+) 392 * 393 * The area from dword 0x30 to 0x3ff is available for driver usage. 394 */ 395 #define I915_GEM_HWS_INDEX 0x30 396 #define I915_GEM_HWS_SCRATCH_INDEX 0x40 397 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 398 399 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 400 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 401 struct intel_ringbuffer *ringbuf); 402 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 403 int intel_alloc_ringbuffer_obj(struct drm_device *dev, 404 struct intel_ringbuffer *ringbuf); 405 406 void intel_stop_ring_buffer(struct intel_engine_cs *ring); 407 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 408 409 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); 410 411 int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); 412 int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); 413 static inline void intel_ring_emit(struct intel_engine_cs *ring, 414 u32 data) 415 { 416 struct intel_ringbuffer *ringbuf = ring->buffer; 417 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 418 ringbuf->tail += 4; 419 } 420 static inline void intel_ring_advance(struct intel_engine_cs *ring) 421 { 422 struct intel_ringbuffer *ringbuf = ring->buffer; 423 ringbuf->tail &= ringbuf->size - 1; 424 } 425 int __intel_ring_space(int head, int tail, int size); 426 void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 427 int intel_ring_space(struct intel_ringbuffer *ringbuf); 428 bool intel_ring_stopped(struct intel_engine_cs *ring); 429 void __intel_ring_advance(struct intel_engine_cs *ring); 430 431 int __must_check intel_ring_idle(struct intel_engine_cs *ring); 432 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); 433 int intel_ring_flush_all_caches(struct intel_engine_cs *ring); 434 int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); 435 436 void intel_fini_pipe_control(struct intel_engine_cs *ring); 437 int intel_init_pipe_control(struct intel_engine_cs *ring); 438 439 int intel_init_render_ring_buffer(struct drm_device *dev); 440 int intel_init_bsd_ring_buffer(struct drm_device *dev); 441 int intel_init_bsd2_ring_buffer(struct drm_device *dev); 442 int intel_init_blt_ring_buffer(struct drm_device *dev); 443 int intel_init_vebox_ring_buffer(struct drm_device *dev); 444 445 u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 446 447 int init_workarounds_ring(struct intel_engine_cs *ring); 448 449 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 450 { 451 return ringbuf->tail; 452 } 453 454 static inline struct drm_i915_gem_request * 455 intel_ring_get_request(struct intel_engine_cs *ring) 456 { 457 BUG_ON(ring->outstanding_lazy_request == NULL); 458 return ring->outstanding_lazy_request; 459 } 460 461 #endif /* _INTEL_RINGBUFFER_H_ */ 462