1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #ifndef _INTEL_LRC_H_ 25 #define _INTEL_LRC_H_ 26 27 /* Execlists regs */ 28 #define RING_ELSP(ring) ((ring)->mmio_base+0x230) 29 #define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) 30 #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) 31 #define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) 32 #define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) 33 34 /* Logical Rings */ 35 void intel_logical_ring_stop(struct intel_engine_cs *ring); 36 void intel_logical_ring_cleanup(struct intel_engine_cs *ring); 37 int intel_logical_rings_init(struct drm_device *dev); 38 39 int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf); 40 void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf); 41 /** 42 * intel_logical_ring_advance() - advance the ringbuffer tail 43 * @ringbuf: Ringbuffer to advance. 44 * 45 * The tail is only updated in our logical ringbuffer struct. 46 */ 47 static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf) 48 { 49 ringbuf->tail &= ringbuf->size - 1; 50 } 51 /** 52 * intel_logical_ring_emit() - write a DWORD to the ringbuffer. 53 * @ringbuf: Ringbuffer to write to. 54 * @data: DWORD to write. 55 */ 56 static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, 57 u32 data) 58 { 59 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 60 ringbuf->tail += 4; 61 } 62 int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords); 63 64 /* Logical Ring Contexts */ 65 int intel_lr_context_render_state_init(struct intel_engine_cs *ring, 66 struct intel_context *ctx); 67 void intel_lr_context_free(struct intel_context *ctx); 68 int intel_lr_context_deferred_create(struct intel_context *ctx, 69 struct intel_engine_cs *ring); 70 71 /* Execlists */ 72 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 73 int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, 74 struct intel_engine_cs *ring, 75 struct intel_context *ctx, 76 struct drm_i915_gem_execbuffer2 *args, 77 struct list_head *vmas, 78 struct drm_i915_gem_object *batch_obj, 79 u64 exec_start, u32 flags); 80 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); 81 82 /** 83 * struct intel_ctx_submit_request - queued context submission request 84 * @ctx: Context to submit to the ELSP. 85 * @ring: Engine to submit it to. 86 * @tail: how far in the context's ringbuffer this request goes to. 87 * @execlist_link: link in the submission queue. 88 * @work: workqueue for processing this request in a bottom half. 89 * @elsp_submitted: no. of times this request has been sent to the ELSP. 90 * 91 * The ELSP only accepts two elements at a time, so we queue context/tail 92 * pairs on a given queue (ring->execlist_queue) until the hardware is 93 * available. The queue serves a double purpose: we also use it to keep track 94 * of the up to 2 contexts currently in the hardware (usually one in execution 95 * and the other queued up by the GPU): We only remove elements from the head 96 * of the queue when the hardware informs us that an element has been 97 * completed. 98 * 99 * All accesses to the queue are mediated by a spinlock (ring->execlist_lock). 100 */ 101 struct intel_ctx_submit_request { 102 struct intel_context *ctx; 103 struct intel_engine_cs *ring; 104 u32 tail; 105 106 struct list_head execlist_link; 107 struct work_struct work; 108 109 int elsp_submitted; 110 }; 111 112 void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring); 113 114 #endif /* _INTEL_LRC_H_ */ 115