xref: /dragonfly/sys/dev/drm/i915/intel_lrc.h (revision 7d89978d)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #ifndef _INTEL_LRC_H_
25 #define _INTEL_LRC_H_
26 
27 #include "intel_ringbuffer.h"
28 
29 #define GEN8_LR_CONTEXT_ALIGN 4096
30 
31 /* Execlists regs */
32 #define RING_ELSP(ring)				_MMIO((ring)->mmio_base + 0x230)
33 #define RING_EXECLIST_STATUS_LO(ring)		_MMIO((ring)->mmio_base + 0x234)
34 #define RING_EXECLIST_STATUS_HI(ring)		_MMIO((ring)->mmio_base + 0x234 + 4)
35 #define RING_CONTEXT_CONTROL(ring)		_MMIO((ring)->mmio_base + 0x244)
36 #define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
37 #define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
38 #define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
39 #define RING_CONTEXT_STATUS_BUF_BASE(ring)	_MMIO((ring)->mmio_base + 0x370)
40 #define RING_CONTEXT_STATUS_BUF_LO(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8)
41 #define RING_CONTEXT_STATUS_BUF_HI(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
42 #define RING_CONTEXT_STATUS_PTR(ring)		_MMIO((ring)->mmio_base + 0x3a0)
43 
44 /* The docs specify that the write pointer wraps around after 5h, "After status
45  * is written out to the last available status QW at offset 5h, this pointer
46  * wraps to 0."
47  *
48  * Therefore, one must infer than even though there are 3 bits available, 6 and
49  * 7 appear to be * reserved.
50  */
51 #define GEN8_CSB_ENTRIES 6
52 #define GEN8_CSB_PTR_MASK 0x7
53 #define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
54 #define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
55 #define GEN8_CSB_WRITE_PTR(csb_status) \
56 	(((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
57 #define GEN8_CSB_READ_PTR(csb_status) \
58 	(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
59 
60 enum {
61 	INTEL_CONTEXT_SCHEDULE_IN = 0,
62 	INTEL_CONTEXT_SCHEDULE_OUT,
63 };
64 
65 /* Logical Rings */
66 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
67 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
68 void intel_logical_ring_stop(struct intel_engine_cs *engine);
69 void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
70 int intel_logical_rings_init(struct drm_device *dev);
71 
72 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
73 /**
74  * intel_logical_ring_advance() - advance the ringbuffer tail
75  * @ringbuf: Ringbuffer to advance.
76  *
77  * The tail is only updated in our logical ringbuffer struct.
78  */
79 static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
80 {
81 	ringbuf->tail &= ringbuf->size - 1;
82 }
83 /**
84  * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
85  * @ringbuf: Ringbuffer to write to.
86  * @data: DWORD to write.
87  */
88 static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
89 					   u32 data)
90 {
91 	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
92 	ringbuf->tail += 4;
93 }
94 static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
95 					       i915_reg_t reg)
96 {
97 	intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
98 }
99 
100 /* Logical Ring Contexts */
101 
102 /* One extra page is added before LRC for GuC as shared data */
103 #define LRC_GUCSHR_PN	(0)
104 #define LRC_PPHWSP_PN	(LRC_GUCSHR_PN + 1)
105 #define LRC_STATE_PN	(LRC_PPHWSP_PN + 1)
106 
107 struct i915_gem_context;
108 
109 uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
110 void intel_lr_context_unpin(struct i915_gem_context *ctx,
111 			    struct intel_engine_cs *engine);
112 
113 struct drm_i915_private;
114 
115 void intel_lr_context_reset(struct drm_i915_private *dev_priv,
116 			    struct i915_gem_context *ctx);
117 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
118 				     struct intel_engine_cs *engine);
119 
120 /* Execlists */
121 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
122 				    int enable_execlists);
123 struct i915_execbuffer_params;
124 int intel_execlists_submission(struct i915_execbuffer_params *params,
125 			       struct drm_i915_gem_execbuffer2 *args,
126 			       struct list_head *vmas);
127 
128 void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
129 
130 #endif /* _INTEL_LRC_H_ */
131