xref: /dragonfly/sys/dev/drm/i915/intel_uc.h (revision 2b7dbe20)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 #ifndef _INTEL_UC_H_
25 #define _INTEL_UC_H_
26 
27 #include "intel_guc_fwif.h"
28 #include "i915_guc_reg.h"
29 #include "intel_ringbuffer.h"
30 
31 #include "i915_vma.h"
32 
33 struct drm_i915_gem_request;
34 
35 /*
36  * This structure primarily describes the GEM object shared with the GuC.
37  * The specs sometimes refer to this object as a "GuC context", but we use
38  * the term "client" to avoid confusion with hardware contexts. This
39  * GEM object is held for the entire lifetime of our interaction with
40  * the GuC, being allocated before the GuC is loaded with its firmware.
41  * Because there's no way to update the address used by the GuC after
42  * initialisation, the shared object must stay pinned into the GGTT as
43  * long as the GuC is in use. We also keep the first page (only) mapped
44  * into kernel address space, as it includes shared data that must be
45  * updated on every request submission.
46  *
47  * The single GEM object described here is actually made up of several
48  * separate areas, as far as the GuC is concerned. The first page (kept
49  * kmap'd) includes the "process descriptor" which holds sequence data for
50  * the doorbell, and one cacheline which actually *is* the doorbell; a
51  * write to this will "ring the doorbell" (i.e. send an interrupt to the
52  * GuC). The subsequent  pages of the client object constitute the work
53  * queue (a circular array of work items), again described in the process
54  * descriptor. Work queue pages are mapped momentarily as required.
55  *
56  * We also keep a few statistics on failures. Ideally, these should all
57  * be zero!
58  *   no_wq_space: times that the submission pre-check found no space was
59  *                available in the work queue (note, the queue is shared,
60  *                not per-engine). It is OK for this to be nonzero, but
61  *                it should not be huge!
62  *   b_fail: failed to ring the doorbell. This should never happen, unless
63  *           somehow the hardware misbehaves, or maybe if the GuC firmware
64  *           crashes? We probably need to reset the GPU to recover.
65  *   retcode: errno from last guc_submit()
66  */
67 struct i915_guc_client {
68 	struct i915_vma *vma;
69 	void *vaddr;
70 	struct i915_gem_context *owner;
71 	struct intel_guc *guc;
72 
73 	uint32_t engines;		/* bitmap of (host) engine ids	*/
74 	uint32_t priority;
75 	u32 stage_id;
76 	uint32_t proc_desc_offset;
77 
78 	u16 doorbell_id;
79 	unsigned long doorbell_offset;
80 	u32 doorbell_cookie;
81 
82 	spinlock_t wq_lock;
83 	uint32_t wq_offset;
84 	uint32_t wq_size;
85 	uint32_t wq_tail;
86 	uint32_t wq_rsvd;
87 	uint32_t no_wq_space;
88 	uint32_t b_fail;
89 	int retcode;
90 
91 	/* Per-engine counts of GuC submissions */
92 	uint64_t submissions[I915_NUM_ENGINES];
93 };
94 
95 enum intel_uc_fw_status {
96 	INTEL_UC_FIRMWARE_FAIL = -1,
97 	INTEL_UC_FIRMWARE_NONE = 0,
98 	INTEL_UC_FIRMWARE_PENDING,
99 	INTEL_UC_FIRMWARE_SUCCESS
100 };
101 
102 /* User-friendly representation of an enum */
103 static inline
104 const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
105 {
106 	switch (status) {
107 	case INTEL_UC_FIRMWARE_FAIL:
108 		return "FAIL";
109 	case INTEL_UC_FIRMWARE_NONE:
110 		return "NONE";
111 	case INTEL_UC_FIRMWARE_PENDING:
112 		return "PENDING";
113 	case INTEL_UC_FIRMWARE_SUCCESS:
114 		return "SUCCESS";
115 	}
116 	return "<invalid>";
117 }
118 
119 enum intel_uc_fw_type {
120 	INTEL_UC_FW_TYPE_GUC,
121 	INTEL_UC_FW_TYPE_HUC
122 };
123 
124 /* User-friendly representation of an enum */
125 static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
126 {
127 	switch (type) {
128 	case INTEL_UC_FW_TYPE_GUC:
129 		return "GuC";
130 	case INTEL_UC_FW_TYPE_HUC:
131 		return "HuC";
132 	}
133 	return "uC";
134 }
135 
136 /*
137  * This structure encapsulates all the data needed during the process
138  * of fetching, caching, and loading the firmware image into the GuC.
139  */
140 struct intel_uc_fw {
141 	const char *path;
142 	size_t size;
143 	struct drm_i915_gem_object *obj;
144 	enum intel_uc_fw_status fetch_status;
145 	enum intel_uc_fw_status load_status;
146 
147 	uint16_t major_ver_wanted;
148 	uint16_t minor_ver_wanted;
149 	uint16_t major_ver_found;
150 	uint16_t minor_ver_found;
151 
152 	enum intel_uc_fw_type type;
153 	uint32_t header_size;
154 	uint32_t header_offset;
155 	uint32_t rsa_size;
156 	uint32_t rsa_offset;
157 	uint32_t ucode_size;
158 	uint32_t ucode_offset;
159 };
160 
161 struct intel_guc_log {
162 	uint32_t flags;
163 	struct i915_vma *vma;
164 	/* The runtime stuff gets created only when GuC logging gets enabled */
165 	struct {
166 		void *buf_addr;
167 		struct workqueue_struct *flush_wq;
168 		struct work_struct flush_work;
169 		struct rchan *relay_chan;
170 	} runtime;
171 	/* logging related stats */
172 	u32 capture_miss_count;
173 	u32 flush_interrupt_count;
174 	u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
175 	u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
176 	u32 flush_count[GUC_MAX_LOG_BUFFER];
177 };
178 
179 struct intel_guc {
180 	struct intel_uc_fw fw;
181 	struct intel_guc_log log;
182 
183 	/* intel_guc_recv interrupt related state */
184 	bool interrupts_enabled;
185 
186 	struct i915_vma *ads_vma;
187 	struct i915_vma *stage_desc_pool;
188 	void *stage_desc_pool_vaddr;
189 	struct ida stage_ids;
190 
191 	struct i915_guc_client *execbuf_client;
192 
193 	DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
194 	uint32_t db_cacheline;		/* Cyclic counter mod pagesize	*/
195 
196 	/* Action status & statistics */
197 	uint64_t action_count;		/* Total commands issued	*/
198 	uint32_t action_cmd;		/* Last command word		*/
199 	uint32_t action_status;		/* Last return status		*/
200 	uint32_t action_fail;		/* Total number of failures	*/
201 	int32_t action_err;		/* Last error code		*/
202 
203 	uint64_t submissions[I915_NUM_ENGINES];
204 	uint32_t last_seqno[I915_NUM_ENGINES];
205 
206 	/* To serialize the intel_guc_send actions */
207 	struct lock send_mutex;
208 
209 	/* GuC's FW specific send function */
210 	int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
211 };
212 
213 struct intel_huc {
214 	/* Generic uC firmware management */
215 	struct intel_uc_fw fw;
216 
217 	/* HuC-specific additions */
218 };
219 
220 /* intel_uc.c */
221 void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
222 void intel_uc_init_early(struct drm_i915_private *dev_priv);
223 void intel_uc_init_fw(struct drm_i915_private *dev_priv);
224 void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
225 int intel_uc_init_hw(struct drm_i915_private *dev_priv);
226 void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
227 int intel_guc_sample_forcewake(struct intel_guc *guc);
228 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
229 static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
230 {
231 	return guc->send(guc, action, len);
232 }
233 
234 /* intel_guc_loader.c */
235 int intel_guc_select_fw(struct intel_guc *guc);
236 int intel_guc_init_hw(struct intel_guc *guc);
237 int intel_guc_suspend(struct drm_i915_private *dev_priv);
238 int intel_guc_resume(struct drm_i915_private *dev_priv);
239 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
240 
241 /* i915_guc_submission.c */
242 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
243 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
244 int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
245 void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
246 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
247 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
248 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
249 
250 /* intel_guc_log.c */
251 int intel_guc_log_create(struct intel_guc *guc);
252 void intel_guc_log_destroy(struct intel_guc *guc);
253 int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
254 void i915_guc_log_register(struct drm_i915_private *dev_priv);
255 void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
256 
257 static inline u32 guc_ggtt_offset(struct i915_vma *vma)
258 {
259 	u32 offset = i915_ggtt_offset(vma);
260 	GEM_BUG_ON(offset < GUC_WOPCM_TOP);
261 	GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
262 	return offset;
263 }
264 
265 /* intel_huc.c */
266 void intel_huc_select_fw(struct intel_huc *huc);
267 int intel_huc_init_hw(struct intel_huc *huc);
268 void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
269 
270 #endif
271