1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 #include <linux/hashtable.h> 5 6 #define I915_CMD_HASH_ORDER 9 7 8 /* 9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 11 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 12 * 13 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 14 * cacheline, the Head Pointer must not be greater than the Tail 15 * Pointer." 16 */ 17 #define I915_RING_FREE_SPACE 64 18 19 struct intel_hw_status_page { 20 u32 *page_addr; 21 unsigned int gfx_addr; 22 struct drm_i915_gem_object *obj; 23 }; 24 25 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 26 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 27 28 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 29 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 30 31 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 32 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 33 34 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 35 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 36 37 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 38 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 39 40 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 41 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 42 43 enum intel_ring_hangcheck_action { 44 HANGCHECK_IDLE = 0, 45 HANGCHECK_WAIT, 46 HANGCHECK_ACTIVE, 47 HANGCHECK_KICK, 48 HANGCHECK_HUNG, 49 }; 50 51 #define HANGCHECK_SCORE_RING_HUNG 31 52 53 struct intel_ring_hangcheck { 54 u64 acthd; 55 u32 seqno; 56 int score; 57 enum intel_ring_hangcheck_action action; 58 int deadlock; 59 }; 60 61 struct intel_ringbuffer { 62 struct drm_i915_gem_object *obj; 63 void __iomem *virtual_start; 64 65 u32 head; 66 u32 tail; 67 int space; 68 int size; 69 int effective_size; 70 71 /** We track the position of the requests in the ring buffer, and 72 * when each is retired we increment last_retired_head as the GPU 73 * must have finished processing the request and so we know we 74 * can advance the ringbuffer up to that position. 75 * 76 * last_retired_head is set to -1 after the value is consumed so 77 * we can detect new retirements. 78 */ 79 u32 last_retired_head; 80 }; 81 82 struct intel_engine_cs { 83 const char *name; 84 enum intel_ring_id { 85 RCS = 0x0, 86 VCS, 87 BCS, 88 VECS, 89 VCS2 90 } id; 91 #define I915_NUM_RINGS 5 92 #define LAST_USER_RING (VECS + 1) 93 u32 mmio_base; 94 struct drm_device *dev; 95 struct intel_ringbuffer *buffer; 96 97 struct intel_hw_status_page status_page; 98 99 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 100 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 101 u32 trace_irq_seqno; 102 bool __must_check (*irq_get)(struct intel_engine_cs *ring); 103 void (*irq_put)(struct intel_engine_cs *ring); 104 105 int (*init)(struct intel_engine_cs *ring); 106 107 void (*write_tail)(struct intel_engine_cs *ring, 108 u32 value); 109 int __must_check (*flush)(struct intel_engine_cs *ring, 110 u32 invalidate_domains, 111 u32 flush_domains); 112 int (*add_request)(struct intel_engine_cs *ring); 113 /* Some chipsets are not quite as coherent as advertised and need 114 * an expensive kick to force a true read of the up-to-date seqno. 115 * However, the up-to-date seqno is not always required and the last 116 * seen value is good enough. Note that the seqno will always be 117 * monotonic, even if not coherent. 118 */ 119 u32 (*get_seqno)(struct intel_engine_cs *ring, 120 bool lazy_coherency); 121 void (*set_seqno)(struct intel_engine_cs *ring, 122 u32 seqno); 123 int (*dispatch_execbuffer)(struct intel_engine_cs *ring, 124 u64 offset, u32 length, 125 unsigned flags); 126 #define I915_DISPATCH_SECURE 0x1 127 #define I915_DISPATCH_PINNED 0x2 128 void (*cleanup)(struct intel_engine_cs *ring); 129 130 struct { 131 u32 sync_seqno[I915_NUM_RINGS-1]; 132 133 struct { 134 /* our mbox written by others */ 135 u32 wait[I915_NUM_RINGS]; 136 /* mboxes this ring signals to */ 137 u32 signal[I915_NUM_RINGS]; 138 } mbox; 139 140 /* AKA wait() */ 141 int (*sync_to)(struct intel_engine_cs *ring, 142 struct intel_engine_cs *to, 143 u32 seqno); 144 int (*signal)(struct intel_engine_cs *signaller, 145 /* num_dwords needed by caller */ 146 unsigned int num_dwords); 147 } semaphore; 148 149 /** 150 * List of objects currently involved in rendering from the 151 * ringbuffer. 152 * 153 * Includes buffers having the contents of their GPU caches 154 * flushed, not necessarily primitives. last_rendering_seqno 155 * represents when the rendering involved will be completed. 156 * 157 * A reference is held on the buffer while on this list. 158 */ 159 struct list_head active_list; 160 161 /** 162 * List of breadcrumbs associated with GPU requests currently 163 * outstanding. 164 */ 165 struct list_head request_list; 166 167 /** 168 * Do we have some not yet emitted requests outstanding? 169 */ 170 struct drm_i915_gem_request *preallocated_lazy_request; 171 u32 outstanding_lazy_seqno; 172 bool gpu_caches_dirty; 173 bool fbc_dirty; 174 175 wait_queue_head_t irq_queue; 176 177 struct intel_context *default_context; 178 struct intel_context *last_context; 179 180 struct intel_ring_hangcheck hangcheck; 181 182 struct { 183 struct drm_i915_gem_object *obj; 184 u32 gtt_offset; 185 volatile u32 *cpu_page; 186 } scratch; 187 188 bool needs_cmd_parser; 189 190 /* 191 * Table of commands the command parser needs to know about 192 * for this ring. 193 */ 194 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 195 196 /* 197 * Table of registers allowed in commands that read/write registers. 198 */ 199 const u32 *reg_table; 200 int reg_count; 201 202 /* 203 * Table of registers allowed in commands that read/write registers, but 204 * only from the DRM master. 205 */ 206 const u32 *master_reg_table; 207 int master_reg_count; 208 209 /* 210 * Returns the bitmask for the length field of the specified command. 211 * Return 0 for an unrecognized/invalid command. 212 * 213 * If the command parser finds an entry for a command in the ring's 214 * cmd_tables, it gets the command's length based on the table entry. 215 * If not, it calls this function to determine the per-ring length field 216 * encoding for the command (i.e. certain opcode ranges use certain bits 217 * to encode the command length in the header). 218 */ 219 u32 (*get_cmd_length_mask)(u32 cmd_header); 220 }; 221 222 static inline bool 223 intel_ring_initialized(struct intel_engine_cs *ring) 224 { 225 return ring->buffer && ring->buffer->obj; 226 } 227 228 static inline unsigned 229 intel_ring_flag(struct intel_engine_cs *ring) 230 { 231 return 1 << ring->id; 232 } 233 234 static inline u32 235 intel_ring_sync_index(struct intel_engine_cs *ring, 236 struct intel_engine_cs *other) 237 { 238 int idx; 239 240 /* 241 * cs -> 0 = vcs, 1 = bcs 242 * vcs -> 0 = bcs, 1 = cs, 243 * bcs -> 0 = cs, 1 = vcs. 244 */ 245 246 idx = (other - ring) - 1; 247 if (idx < 0) 248 idx += I915_NUM_RINGS; 249 250 return idx; 251 } 252 253 static inline u32 254 intel_read_status_page(struct intel_engine_cs *ring, 255 int reg) 256 { 257 /* Ensure that the compiler doesn't optimize away the load. */ 258 barrier(); 259 return ring->status_page.page_addr[reg]; 260 } 261 262 static inline void 263 intel_write_status_page(struct intel_engine_cs *ring, 264 int reg, u32 value) 265 { 266 ring->status_page.page_addr[reg] = value; 267 } 268 269 /** 270 * Reads a dword out of the status page, which is written to from the command 271 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 272 * MI_STORE_DATA_IMM. 273 * 274 * The following dwords have a reserved meaning: 275 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 276 * 0x04: ring 0 head pointer 277 * 0x05: ring 1 head pointer (915-class) 278 * 0x06: ring 2 head pointer (915-class) 279 * 0x10-0x1b: Context status DWords (GM45) 280 * 0x1f: Last written status offset. (GM45) 281 * 282 * The area from dword 0x20 to 0x3ff is available for driver usage. 283 */ 284 #define I915_GEM_HWS_INDEX 0x20 285 #define I915_GEM_HWS_SCRATCH_INDEX 0x30 286 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 287 288 void intel_stop_ring_buffer(struct intel_engine_cs *ring); 289 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 290 291 int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); 292 int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); 293 static inline void intel_ring_emit(struct intel_engine_cs *ring, 294 u32 data) 295 { 296 struct intel_ringbuffer *ringbuf = ring->buffer; 297 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 298 ringbuf->tail += 4; 299 } 300 static inline void intel_ring_advance(struct intel_engine_cs *ring) 301 { 302 struct intel_ringbuffer *ringbuf = ring->buffer; 303 ringbuf->tail &= ringbuf->size - 1; 304 } 305 void __intel_ring_advance(struct intel_engine_cs *ring); 306 307 int __must_check intel_ring_idle(struct intel_engine_cs *ring); 308 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); 309 int intel_ring_flush_all_caches(struct intel_engine_cs *ring); 310 int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); 311 312 int intel_init_render_ring_buffer(struct drm_device *dev); 313 int intel_init_bsd_ring_buffer(struct drm_device *dev); 314 int intel_init_bsd2_ring_buffer(struct drm_device *dev); 315 int intel_init_blt_ring_buffer(struct drm_device *dev); 316 int intel_init_vebox_ring_buffer(struct drm_device *dev); 317 318 u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 319 void intel_ring_setup_status_page(struct intel_engine_cs *ring); 320 321 static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring) 322 { 323 return ring->buffer->tail; 324 } 325 326 static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) 327 { 328 BUG_ON(ring->outstanding_lazy_seqno == 0); 329 return ring->outstanding_lazy_seqno; 330 } 331 332 static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno) 333 { 334 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 335 ring->trace_irq_seqno = seqno; 336 } 337 338 /* DRI warts */ 339 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); 340 341 #endif /* _INTEL_RINGBUFFER_H_ */ 342