1 #ifndef _INTEL_RINGBUFFER_H_ 2 #define _INTEL_RINGBUFFER_H_ 3 4 /* 5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 8 * 9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 10 * cacheline, the Head Pointer must not be greater than the Tail 11 * Pointer." 12 */ 13 #define I915_RING_FREE_SPACE 64 14 15 struct intel_hw_status_page { 16 u32 *page_addr; 17 unsigned int gfx_addr; 18 struct drm_i915_gem_object *obj; 19 }; 20 21 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 22 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 23 24 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 25 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 26 27 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 28 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 29 30 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 31 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 32 33 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 34 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 35 36 #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) 37 #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 38 #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 39 40 struct intel_ring_buffer { 41 const char *name; 42 enum intel_ring_id { 43 RCS = 0x0, 44 VCS, 45 BCS, 46 } id; 47 #define I915_NUM_RINGS 3 48 uint32_t mmio_base; 49 void *virtual_start; 50 struct drm_device *dev; 51 struct drm_i915_gem_object *obj; 52 53 uint32_t head; 54 uint32_t tail; 55 int space; 56 int size; 57 int effective_size; 58 struct intel_hw_status_page status_page; 59 60 /** We track the position of the requests in the ring buffer, and 61 * when each is retired we increment last_retired_head as the GPU 62 * must have finished processing the request and so we know we 63 * can advance the ringbuffer up to that position. 64 * 65 * last_retired_head is set to -1 after the value is consumed so 66 * we can detect new retirements. 67 */ 68 u32 last_retired_head; 69 70 struct lock irq_lock; 71 uint32_t irq_refcount; 72 uint32_t irq_mask; 73 uint32_t irq_seqno; /* last seq seem at irq time */ 74 uint32_t trace_irq_seqno; 75 uint32_t waiting_seqno; 76 uint32_t sync_seqno[I915_NUM_RINGS-1]; 77 bool (*irq_get)(struct intel_ring_buffer *ring); 78 void (*irq_put)(struct intel_ring_buffer *ring); 79 80 int (*init)(struct intel_ring_buffer *ring); 81 82 void (*write_tail)(struct intel_ring_buffer *ring, 83 uint32_t value); 84 int (*flush)(struct intel_ring_buffer *ring, 85 uint32_t invalidate_domains, 86 uint32_t flush_domains); 87 int (*add_request)(struct intel_ring_buffer *ring, 88 uint32_t *seqno); 89 uint32_t (*get_seqno)(struct intel_ring_buffer *ring); 90 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 91 uint32_t offset, uint32_t length); 92 void (*cleanup)(struct intel_ring_buffer *ring); 93 int (*sync_to)(struct intel_ring_buffer *ring, 94 struct intel_ring_buffer *to, 95 u32 seqno); 96 97 u32 semaphore_register[3]; /*our mbox written by others */ 98 u32 signal_mbox[2]; /* mboxes this ring signals to */ 99 100 /** 101 * List of objects currently involved in rendering from the 102 * ringbuffer. 103 * 104 * Includes buffers having the contents of their GPU caches 105 * flushed, not necessarily primitives. last_rendering_seqno 106 * represents when the rendering involved will be completed. 107 * 108 * A reference is held on the buffer while on this list. 109 */ 110 struct list_head active_list; 111 112 /** 113 * List of breadcrumbs associated with GPU requests currently 114 * outstanding. 115 */ 116 struct list_head request_list; 117 118 /** 119 * List of objects currently pending a GPU write flush. 120 * 121 * All elements on this list will belong to either the 122 * active_list or flushing_list, last_rendering_seqno can 123 * be used to differentiate between the two elements. 124 */ 125 struct list_head gpu_write_list; 126 127 /** 128 * Do we have some not yet emitted requests outstanding? 129 */ 130 uint32_t outstanding_lazy_request; 131 132 drm_local_map_t map; 133 134 void *private; 135 }; 136 137 static inline bool 138 intel_ring_initialized(struct intel_ring_buffer *ring) 139 { 140 return ring->obj != NULL; 141 } 142 143 static inline unsigned 144 intel_ring_flag(struct intel_ring_buffer *ring) 145 { 146 return 1 << ring->id; 147 } 148 149 static inline u32 150 intel_ring_sync_index(struct intel_ring_buffer *ring, 151 struct intel_ring_buffer *other) 152 { 153 int idx; 154 155 /* 156 * cs -> 0 = vcs, 1 = bcs 157 * vcs -> 0 = bcs, 1 = cs, 158 * bcs -> 0 = cs, 1 = vcs. 159 */ 160 161 idx = (other - ring) - 1; 162 if (idx < 0) 163 idx += I915_NUM_RINGS; 164 165 return idx; 166 } 167 168 static inline u32 169 intel_read_status_page(struct intel_ring_buffer *ring, 170 int reg) 171 { 172 /* Ensure that the compiler doesn't optimize away the load. */ 173 cpu_ccfence(); 174 return ring->status_page.page_addr[reg]; 175 } 176 177 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); 178 static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) 179 { 180 181 return (intel_wait_ring_buffer(ring, ring->size - 8)); 182 } 183 184 /** 185 * Reads a dword out of the status page, which is written to from the command 186 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 187 * MI_STORE_DATA_IMM. 188 * 189 * The following dwords have a reserved meaning: 190 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 191 * 0x04: ring 0 head pointer 192 * 0x05: ring 1 head pointer (915-class) 193 * 0x06: ring 2 head pointer (915-class) 194 * 0x10-0x1b: Context status DWords (GM45) 195 * 0x1f: Last written status offset. (GM45) 196 * 197 * The area from dword 0x20 to 0x3ff is available for driver usage. 198 */ 199 #define I915_GEM_HWS_INDEX 0x20 200 #define I915_GEM_HWS_SCRATCH_INDEX 0x30 201 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 202 203 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 204 205 #define iowrite32(data, addr) *(volatile uint32_t *)((char *)addr) = data; 206 207 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 208 static inline void intel_ring_emit(struct intel_ring_buffer *ring, 209 u32 data) 210 { 211 iowrite32(data, ring->virtual_start + ring->tail); 212 ring->tail += 4; 213 } 214 void intel_ring_advance(struct intel_ring_buffer *ring); 215 int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 216 217 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 218 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 219 220 int intel_init_render_ring_buffer(struct drm_device *dev); 221 int intel_init_bsd_ring_buffer(struct drm_device *dev); 222 int intel_init_blt_ring_buffer(struct drm_device *dev); 223 224 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 225 void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 226 227 static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) 228 { 229 return ring->tail; 230 } 231 232 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) 233 { 234 BUG_ON(ring->outstanding_lazy_request == 0); 235 return ring->outstanding_lazy_request; 236 } 237 238 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 239 { 240 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 241 ring->trace_irq_seqno = seqno; 242 } 243 244 /* DRI warts */ 245 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); 246 247 #endif /* _INTEL_RINGBUFFER_H_ */ 248