xref: /linux/drivers/gpu/drm/xe/xe_guc.c (revision 021bc4b9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc.h"
7 
8 #include <drm/drm_managed.h>
9 
10 #include "abi/guc_actions_abi.h"
11 #include "abi/guc_errors_abi.h"
12 #include "generated/xe_wa_oob.h"
13 #include "regs/xe_gt_regs.h"
14 #include "regs/xe_guc_regs.h"
15 #include "xe_bo.h"
16 #include "xe_device.h"
17 #include "xe_force_wake.h"
18 #include "xe_gt.h"
19 #include "xe_guc_ads.h"
20 #include "xe_guc_ct.h"
21 #include "xe_guc_hwconfig.h"
22 #include "xe_guc_log.h"
23 #include "xe_guc_pc.h"
24 #include "xe_guc_submit.h"
25 #include "xe_mmio.h"
26 #include "xe_platform_types.h"
27 #include "xe_uc.h"
28 #include "xe_uc_fw.h"
29 #include "xe_wa.h"
30 #include "xe_wopcm.h"
31 
32 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
33 #define GUC_GGTT_TOP    0xFEE00000
34 static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
35 			    struct xe_bo *bo)
36 {
37 	struct xe_device *xe = guc_to_xe(guc);
38 	u32 addr = xe_bo_ggtt_addr(bo);
39 
40 	xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
41 	xe_assert(xe, addr < GUC_GGTT_TOP);
42 	xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
43 
44 	return addr;
45 }
46 
47 static u32 guc_ctl_debug_flags(struct xe_guc *guc)
48 {
49 	u32 level = xe_guc_log_get_level(&guc->log);
50 	u32 flags = 0;
51 
52 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
53 		flags |= GUC_LOG_DISABLED;
54 	else
55 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
56 			 GUC_LOG_VERBOSITY_SHIFT;
57 
58 	return flags;
59 }
60 
61 static u32 guc_ctl_feature_flags(struct xe_guc *guc)
62 {
63 	u32 flags = 0;
64 
65 	if (!guc_to_xe(guc)->info.skip_guc_pc)
66 		flags |= GUC_CTL_ENABLE_SLPC;
67 
68 	return flags;
69 }
70 
71 static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
72 {
73 	u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
74 	u32 flags;
75 
76 	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
77 	#define LOG_UNIT SZ_1M
78 	#define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
79 	#else
80 	#define LOG_UNIT SZ_4K
81 	#define LOG_FLAG 0
82 	#endif
83 
84 	#if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
85 	#define CAPTURE_UNIT SZ_1M
86 	#define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
87 	#else
88 	#define CAPTURE_UNIT SZ_4K
89 	#define CAPTURE_FLAG 0
90 	#endif
91 
92 	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
93 	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
94 	BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
95 	BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
96 	BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
97 	BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
98 
99 	BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
100 			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
101 	BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
102 			(GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
103 	BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
104 			(GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
105 
106 	flags = GUC_LOG_VALID |
107 		GUC_LOG_NOTIFY_ON_HALF_FULL |
108 		CAPTURE_FLAG |
109 		LOG_FLAG |
110 		((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
111 		((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
112 		((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
113 		 GUC_LOG_CAPTURE_SHIFT) |
114 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
115 
116 	#undef LOG_UNIT
117 	#undef LOG_FLAG
118 	#undef CAPTURE_UNIT
119 	#undef CAPTURE_FLAG
120 
121 	return flags;
122 }
123 
124 static u32 guc_ctl_ads_flags(struct xe_guc *guc)
125 {
126 	u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
127 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
128 
129 	return flags;
130 }
131 
132 static u32 guc_ctl_wa_flags(struct xe_guc *guc)
133 {
134 	struct xe_device *xe = guc_to_xe(guc);
135 	struct xe_gt *gt = guc_to_gt(guc);
136 	u32 flags = 0;
137 
138 	if (XE_WA(gt, 22012773006))
139 		flags |= GUC_WA_POLLCS;
140 
141 	if (XE_WA(gt, 16011759253))
142 		flags |= GUC_WA_GAM_CREDITS;
143 
144 	if (XE_WA(gt, 14014475959))
145 		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
146 
147 	if (XE_WA(gt, 22011391025) || XE_WA(gt, 14012197797))
148 		flags |= GUC_WA_DUAL_QUEUE;
149 
150 	/*
151 	 * Wa_22011802037: FIXME - there's more to be done than simply setting
152 	 * this flag: make sure each CS is stopped when preparing for GT reset
153 	 * and wait for pending MI_FW.
154 	 */
155 	if (GRAPHICS_VERx100(xe) < 1270)
156 		flags |= GUC_WA_PRE_PARSER;
157 
158 	if (XE_WA(gt, 16011777198))
159 		flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
160 
161 	if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
162 		flags |= GUC_WA_CONTEXT_ISOLATION;
163 
164 	if ((XE_WA(gt, 16015675438) || XE_WA(gt, 18020744125)) &&
165 	    !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
166 		flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
167 
168 	if (XE_WA(gt, 1509372804))
169 		flags |= GUC_WA_RENDER_RST_RC6_EXIT;
170 
171 	return flags;
172 }
173 
174 static u32 guc_ctl_devid(struct xe_guc *guc)
175 {
176 	struct xe_device *xe = guc_to_xe(guc);
177 
178 	return (((u32)xe->info.devid) << 16) | xe->info.revid;
179 }
180 
181 static void guc_init_params(struct xe_guc *guc)
182 {
183 	struct xe_device *xe = guc_to_xe(guc);
184 	u32 *params = guc->params;
185 	int i;
186 
187 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
188 	BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
189 
190 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
191 	params[GUC_CTL_FEATURE] = 0;
192 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
193 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
194 	params[GUC_CTL_WA] = 0;
195 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
196 
197 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
198 		drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
199 }
200 
201 static void guc_init_params_post_hwconfig(struct xe_guc *guc)
202 {
203 	struct xe_device *xe = guc_to_xe(guc);
204 	u32 *params = guc->params;
205 	int i;
206 
207 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
208 	BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
209 
210 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
211 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
212 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
213 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
214 	params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
215 	params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
216 
217 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
218 		drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
219 }
220 
221 /*
222  * Initialize the GuC parameter block before starting the firmware
223  * transfer. These parameters are read by the firmware on startup
224  * and cannot be changed thereafter.
225  */
226 static void guc_write_params(struct xe_guc *guc)
227 {
228 	struct xe_gt *gt = guc_to_gt(guc);
229 	int i;
230 
231 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
232 
233 	xe_mmio_write32(gt, SOFT_SCRATCH(0), 0);
234 
235 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
236 		xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
237 }
238 
239 static void guc_fini(struct drm_device *drm, void *arg)
240 {
241 	struct xe_guc *guc = arg;
242 
243 	xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
244 	xe_guc_pc_fini(&guc->pc);
245 	xe_uc_fini_hw(&guc_to_gt(guc)->uc);
246 	xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
247 }
248 
249 int xe_guc_init(struct xe_guc *guc)
250 {
251 	struct xe_device *xe = guc_to_xe(guc);
252 	struct xe_gt *gt = guc_to_gt(guc);
253 	int ret;
254 
255 	guc->fw.type = XE_UC_FW_TYPE_GUC;
256 	ret = xe_uc_fw_init(&guc->fw);
257 	if (ret)
258 		goto out;
259 
260 	if (!xe_uc_fw_is_enabled(&guc->fw))
261 		return 0;
262 
263 	ret = xe_guc_log_init(&guc->log);
264 	if (ret)
265 		goto out;
266 
267 	ret = xe_guc_ads_init(&guc->ads);
268 	if (ret)
269 		goto out;
270 
271 	ret = xe_guc_ct_init(&guc->ct);
272 	if (ret)
273 		goto out;
274 
275 	ret = xe_guc_pc_init(&guc->pc);
276 	if (ret)
277 		goto out;
278 
279 	ret = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, guc_fini, guc);
280 	if (ret)
281 		goto out;
282 
283 	guc_init_params(guc);
284 
285 	if (xe_gt_is_media_type(gt))
286 		guc->notify_reg = MED_GUC_HOST_INTERRUPT;
287 	else
288 		guc->notify_reg = GUC_HOST_INTERRUPT;
289 
290 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
291 
292 	return 0;
293 
294 out:
295 	drm_err(&xe->drm, "GuC init failed with %d", ret);
296 	return ret;
297 }
298 
299 /**
300  * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
301  * @guc: The GuC object
302  *
303  * Return: 0 on success, negative error code on error.
304  */
305 int xe_guc_init_post_hwconfig(struct xe_guc *guc)
306 {
307 	guc_init_params_post_hwconfig(guc);
308 
309 	return xe_guc_ads_init_post_hwconfig(&guc->ads);
310 }
311 
312 int xe_guc_post_load_init(struct xe_guc *guc)
313 {
314 	xe_guc_ads_populate_post_load(&guc->ads);
315 	guc->submission_state.enabled = true;
316 
317 	return 0;
318 }
319 
320 int xe_guc_reset(struct xe_guc *guc)
321 {
322 	struct xe_device *xe = guc_to_xe(guc);
323 	struct xe_gt *gt = guc_to_gt(guc);
324 	u32 guc_status, gdrst;
325 	int ret;
326 
327 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
328 
329 	xe_mmio_write32(gt, GDRST, GRDOM_GUC);
330 
331 	ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
332 	if (ret) {
333 		drm_err(&xe->drm, "GuC reset timed out, GDRST=0x%8x\n",
334 			gdrst);
335 		goto err_out;
336 	}
337 
338 	guc_status = xe_mmio_read32(gt, GUC_STATUS);
339 	if (!(guc_status & GS_MIA_IN_RESET)) {
340 		drm_err(&xe->drm,
341 			"GuC status: 0x%x, MIA core expected to be in reset\n",
342 			guc_status);
343 		ret = -EIO;
344 		goto err_out;
345 	}
346 
347 	return 0;
348 
349 err_out:
350 
351 	return ret;
352 }
353 
354 static void guc_prepare_xfer(struct xe_guc *guc)
355 {
356 	struct xe_gt *gt = guc_to_gt(guc);
357 	struct xe_device *xe =  guc_to_xe(guc);
358 	u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
359 		GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
360 		GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
361 		GUC_ENABLE_MIA_CLOCK_GATING;
362 
363 	if (GRAPHICS_VERx100(xe) < 1250)
364 		shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
365 				GUC_ENABLE_MIA_CACHING;
366 
367 	if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
368 		shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
369 
370 	/* Must program this register before loading the ucode with DMA */
371 	xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
372 
373 	xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
374 }
375 
376 /*
377  * Supporting MMIO & in memory RSA
378  */
379 static int guc_xfer_rsa(struct xe_guc *guc)
380 {
381 	struct xe_gt *gt = guc_to_gt(guc);
382 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
383 	size_t copied;
384 	int i;
385 
386 	if (guc->fw.rsa_size > 256) {
387 		u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
388 				    xe_uc_fw_rsa_offset(&guc->fw);
389 		xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
390 		return 0;
391 	}
392 
393 	copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
394 	if (copied < sizeof(rsa))
395 		return -ENOMEM;
396 
397 	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
398 		xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]);
399 
400 	return 0;
401 }
402 
403 static int guc_wait_ucode(struct xe_guc *guc)
404 {
405 	struct xe_device *xe = guc_to_xe(guc);
406 	u32 status;
407 	int ret;
408 
409 	/*
410 	 * Wait for the GuC to start up.
411 	 * NB: Docs recommend not using the interrupt for completion.
412 	 * Measurements indicate this should take no more than 20ms
413 	 * (assuming the GT clock is at maximum frequency). So, a
414 	 * timeout here indicates that the GuC has failed and is unusable.
415 	 * (Higher levels of the driver may decide to reset the GuC and
416 	 * attempt the ucode load again if this happens.)
417 	 *
418 	 * FIXME: There is a known (but exceedingly unlikely) race condition
419 	 * where the asynchronous frequency management code could reduce
420 	 * the GT clock while a GuC reload is in progress (during a full
421 	 * GT reset). A fix is in progress but there are complex locking
422 	 * issues to be resolved. In the meantime bump the timeout to
423 	 * 200ms. Even at slowest clock, this should be sufficient. And
424 	 * in the working case, a larger timeout makes no difference.
425 	 */
426 	ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK,
427 			     FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
428 			     200000, &status, false);
429 
430 	if (ret) {
431 		struct drm_device *drm = &xe->drm;
432 		struct drm_printer p = drm_info_printer(drm->dev);
433 
434 		drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
435 		drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
436 			 REG_FIELD_GET(GS_MIA_IN_RESET, status),
437 			 REG_FIELD_GET(GS_BOOTROM_MASK, status),
438 			 REG_FIELD_GET(GS_UKERNEL_MASK, status),
439 			 REG_FIELD_GET(GS_MIA_MASK, status),
440 			 REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
441 
442 		if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
443 			drm_info(drm, "GuC firmware signature verification failed\n");
444 			ret = -ENOEXEC;
445 		}
446 
447 		if (REG_FIELD_GET(GS_UKERNEL_MASK, status) ==
448 		    XE_GUC_LOAD_STATUS_EXCEPTION) {
449 			drm_info(drm, "GuC firmware exception. EIP: %#x\n",
450 				 xe_mmio_read32(guc_to_gt(guc),
451 						SOFT_SCRATCH(13)));
452 			ret = -ENXIO;
453 		}
454 
455 		xe_guc_log_print(&guc->log, &p);
456 	} else {
457 		drm_dbg(&xe->drm, "GuC successfully loaded");
458 	}
459 
460 	return ret;
461 }
462 
463 static int __xe_guc_upload(struct xe_guc *guc)
464 {
465 	int ret;
466 
467 	guc_write_params(guc);
468 	guc_prepare_xfer(guc);
469 
470 	/*
471 	 * Note that GuC needs the CSS header plus uKernel code to be copied
472 	 * by the DMA engine in one operation, whereas the RSA signature is
473 	 * loaded separately, either by copying it to the UOS_RSA_SCRATCH
474 	 * register (if key size <= 256) or through a ggtt-pinned vma (if key
475 	 * size > 256). The RSA size and therefore the way we provide it to the
476 	 * HW is fixed for each platform and hard-coded in the bootrom.
477 	 */
478 	ret = guc_xfer_rsa(guc);
479 	if (ret)
480 		goto out;
481 	/*
482 	 * Current uCode expects the code to be loaded at 8k; locations below
483 	 * this are used for the stack.
484 	 */
485 	ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
486 	if (ret)
487 		goto out;
488 
489 	/* Wait for authentication */
490 	ret = guc_wait_ucode(guc);
491 	if (ret)
492 		goto out;
493 
494 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
495 	return 0;
496 
497 out:
498 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
499 	return 0	/* FIXME: ret, don't want to stop load currently */;
500 }
501 
502 /**
503  * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
504  * @guc: The GuC object
505  *
506  * This function uploads a minimal GuC that does not support submissions but
507  * in a state where the hwconfig table can be read. Next, it reads and parses
508  * the hwconfig table so it can be used for subsequent steps in the driver load.
509  * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
510  *
511  * Return: 0 on success, negative error code on error.
512  */
513 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
514 {
515 	int ret;
516 
517 	xe_guc_ads_populate_minimal(&guc->ads);
518 
519 	ret = __xe_guc_upload(guc);
520 	if (ret)
521 		return ret;
522 
523 	ret = xe_guc_hwconfig_init(guc);
524 	if (ret)
525 		return ret;
526 
527 	ret = xe_guc_enable_communication(guc);
528 	if (ret)
529 		return ret;
530 
531 	return 0;
532 }
533 
534 int xe_guc_upload(struct xe_guc *guc)
535 {
536 	xe_guc_ads_populate(&guc->ads);
537 
538 	return __xe_guc_upload(guc);
539 }
540 
541 static void guc_handle_mmio_msg(struct xe_guc *guc)
542 {
543 	struct xe_gt *gt = guc_to_gt(guc);
544 	u32 msg;
545 
546 	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
547 
548 	msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
549 	msg &= XE_GUC_RECV_MSG_EXCEPTION |
550 		XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
551 	xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
552 
553 	if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
554 		drm_err(&guc_to_xe(guc)->drm,
555 			"Received early GuC crash dump notification!\n");
556 
557 	if (msg & XE_GUC_RECV_MSG_EXCEPTION)
558 		drm_err(&guc_to_xe(guc)->drm,
559 			"Received early GuC exception notification!\n");
560 }
561 
562 static void guc_enable_irq(struct xe_guc *guc)
563 {
564 	struct xe_gt *gt = guc_to_gt(guc);
565 	u32 events = xe_gt_is_media_type(gt) ?
566 		REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST)  :
567 		REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
568 
569 	/* Primary GuC and media GuC share a single enable bit */
570 	xe_mmio_write32(gt, GUC_SG_INTR_ENABLE,
571 			REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
572 
573 	/*
574 	 * There are separate mask bits for primary and media GuCs, so use
575 	 * a RMW operation to avoid clobbering the other GuC's setting.
576 	 */
577 	xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0);
578 }
579 
580 int xe_guc_enable_communication(struct xe_guc *guc)
581 {
582 	int err;
583 
584 	guc_enable_irq(guc);
585 
586 	xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
587 		      ARAT_EXPIRED_INTRMSK, 0);
588 
589 	err = xe_guc_ct_enable(&guc->ct);
590 	if (err)
591 		return err;
592 
593 	guc_handle_mmio_msg(guc);
594 
595 	return 0;
596 }
597 
598 int xe_guc_suspend(struct xe_guc *guc)
599 {
600 	int ret;
601 	u32 action[] = {
602 		XE_GUC_ACTION_CLIENT_SOFT_RESET,
603 	};
604 
605 	ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
606 	if (ret) {
607 		drm_err(&guc_to_xe(guc)->drm,
608 			"GuC suspend: CLIENT_SOFT_RESET fail: %d!\n", ret);
609 		return ret;
610 	}
611 
612 	xe_guc_sanitize(guc);
613 	return 0;
614 }
615 
616 void xe_guc_notify(struct xe_guc *guc)
617 {
618 	struct xe_gt *gt = guc_to_gt(guc);
619 	const u32 default_notify_data = 0;
620 
621 	/*
622 	 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
623 	 * additional payload data to the GuC but this capability is not
624 	 * used by the firmware yet. Use default value in the meantime.
625 	 */
626 	xe_mmio_write32(gt, guc->notify_reg, default_notify_data);
627 }
628 
629 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
630 {
631 	u32 action[] = {
632 		XE_GUC_ACTION_AUTHENTICATE_HUC,
633 		rsa_addr
634 	};
635 
636 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
637 }
638 
639 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
640 			  u32 len, u32 *response_buf)
641 {
642 	struct xe_device *xe = guc_to_xe(guc);
643 	struct xe_gt *gt = guc_to_gt(guc);
644 	u32 header, reply;
645 	struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
646 		MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
647 	const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
648 	int ret;
649 	int i;
650 
651 	BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
652 
653 	xe_assert(xe, !guc->ct.enabled);
654 	xe_assert(xe, len);
655 	xe_assert(xe, len <= VF_SW_FLAG_COUNT);
656 	xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
657 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
658 		  GUC_HXG_ORIGIN_HOST);
659 	xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
660 		  GUC_HXG_TYPE_REQUEST);
661 
662 retry:
663 	/* Not in critical data-path, just do if else for GT type */
664 	if (xe_gt_is_media_type(gt)) {
665 		for (i = 0; i < len; ++i)
666 			xe_mmio_write32(gt, MED_VF_SW_FLAG(i),
667 					request[i]);
668 		xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX));
669 	} else {
670 		for (i = 0; i < len; ++i)
671 			xe_mmio_write32(gt, VF_SW_FLAG(i),
672 					request[i]);
673 		xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX));
674 	}
675 
676 	xe_guc_notify(guc);
677 
678 	ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
679 			     FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
680 			     50000, &reply, false);
681 	if (ret) {
682 timeout:
683 		drm_err(&xe->drm, "mmio request %#x: no reply %#x\n",
684 			request[0], reply);
685 		return ret;
686 	}
687 
688 	header = xe_mmio_read32(gt, reply_reg);
689 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
690 	    GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
691 		/*
692 		 * Once we got a BUSY reply we must wait again for the final
693 		 * response but this time we can't use ORIGIN mask anymore.
694 		 * To spot a right change in the reply, we take advantage that
695 		 * response SUCCESS and FAILURE differ only by the single bit
696 		 * and all other bits are set and can be used as a new mask.
697 		 */
698 		u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
699 		u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
700 
701 		BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
702 		BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
703 
704 		ret = xe_mmio_wait32(gt, reply_reg,  resp_mask, resp_mask,
705 				     1000000, &header, false);
706 
707 		if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
708 			     GUC_HXG_ORIGIN_GUC))
709 			goto proto;
710 		if (unlikely(ret))
711 			goto timeout;
712 	}
713 
714 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
715 	    GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
716 		u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
717 
718 		drm_dbg(&xe->drm, "mmio request %#x: retrying, reason %#x\n",
719 			request[0], reason);
720 		goto retry;
721 	}
722 
723 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
724 	    GUC_HXG_TYPE_RESPONSE_FAILURE) {
725 		u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
726 		u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
727 
728 		drm_err(&xe->drm, "mmio request %#x: failure %#x/%#x\n",
729 			request[0], error, hint);
730 		return -ENXIO;
731 	}
732 
733 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
734 	    GUC_HXG_TYPE_RESPONSE_SUCCESS) {
735 proto:
736 		drm_err(&xe->drm, "mmio request %#x: unexpected reply %#x\n",
737 			request[0], header);
738 		return -EPROTO;
739 	}
740 
741 	/* Just copy entire possible message response */
742 	if (response_buf) {
743 		response_buf[0] = header;
744 
745 		for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
746 			reply_reg.addr += sizeof(u32);
747 			response_buf[i] = xe_mmio_read32(gt, reply_reg);
748 		}
749 	}
750 
751 	/* Use data from the GuC response as our return value */
752 	return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
753 }
754 
755 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
756 {
757 	return xe_guc_mmio_send_recv(guc, request, len, NULL);
758 }
759 
760 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
761 {
762 	struct xe_device *xe = guc_to_xe(guc);
763 	u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
764 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
765 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
766 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
767 			   GUC_ACTION_HOST2GUC_SELF_CFG),
768 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
769 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
770 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
771 			   lower_32_bits(val)),
772 		FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
773 			   upper_32_bits(val)),
774 	};
775 	int ret;
776 
777 	xe_assert(xe, len <= 2);
778 	xe_assert(xe, len != 1 || !upper_32_bits(val));
779 
780 	/* Self config must go over MMIO */
781 	ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
782 
783 	if (unlikely(ret < 0))
784 		return ret;
785 	if (unlikely(ret > 1))
786 		return -EPROTO;
787 	if (unlikely(!ret))
788 		return -ENOKEY;
789 
790 	return 0;
791 }
792 
793 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
794 {
795 	return guc_self_cfg(guc, key, 1, val);
796 }
797 
798 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
799 {
800 	return guc_self_cfg(guc, key, 2, val);
801 }
802 
803 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
804 {
805 	if (iir & GUC_INTR_GUC2HOST)
806 		xe_guc_ct_irq_handler(&guc->ct);
807 }
808 
809 void xe_guc_sanitize(struct xe_guc *guc)
810 {
811 	xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
812 	xe_guc_ct_disable(&guc->ct);
813 	guc->submission_state.enabled = false;
814 }
815 
816 int xe_guc_reset_prepare(struct xe_guc *guc)
817 {
818 	return xe_guc_submit_reset_prepare(guc);
819 }
820 
821 void xe_guc_reset_wait(struct xe_guc *guc)
822 {
823 	xe_guc_submit_reset_wait(guc);
824 }
825 
826 void xe_guc_stop_prepare(struct xe_guc *guc)
827 {
828 	XE_WARN_ON(xe_guc_pc_stop(&guc->pc));
829 }
830 
831 int xe_guc_stop(struct xe_guc *guc)
832 {
833 	int ret;
834 
835 	xe_guc_ct_disable(&guc->ct);
836 
837 	ret = xe_guc_submit_stop(guc);
838 	if (ret)
839 		return ret;
840 
841 	return 0;
842 }
843 
844 int xe_guc_start(struct xe_guc *guc)
845 {
846 	int ret;
847 
848 	ret = xe_guc_pc_start(&guc->pc);
849 	XE_WARN_ON(ret);
850 
851 	return xe_guc_submit_start(guc);
852 }
853 
854 void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
855 {
856 	struct xe_gt *gt = guc_to_gt(guc);
857 	u32 status;
858 	int err;
859 	int i;
860 
861 	xe_uc_fw_print(&guc->fw, p);
862 
863 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
864 	if (err)
865 		return;
866 
867 	status = xe_mmio_read32(gt, GUC_STATUS);
868 
869 	drm_printf(p, "\nGuC status 0x%08x:\n", status);
870 	drm_printf(p, "\tBootrom status = 0x%x\n",
871 		   REG_FIELD_GET(GS_BOOTROM_MASK, status));
872 	drm_printf(p, "\tuKernel status = 0x%x\n",
873 		   REG_FIELD_GET(GS_UKERNEL_MASK, status));
874 	drm_printf(p, "\tMIA Core status = 0x%x\n",
875 		   REG_FIELD_GET(GS_MIA_MASK, status));
876 	drm_printf(p, "\tLog level = %d\n",
877 		   xe_guc_log_get_level(&guc->log));
878 
879 	drm_puts(p, "\nScratch registers:\n");
880 	for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
881 		drm_printf(p, "\t%2d: \t0x%x\n",
882 			   i, xe_mmio_read32(gt, SOFT_SCRATCH(i)));
883 	}
884 
885 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
886 
887 	xe_guc_ct_print(&guc->ct, p, false);
888 	xe_guc_submit_print(guc, p);
889 }
890 
891 /**
892  * xe_guc_in_reset() - Detect if GuC MIA is in reset.
893  * @guc: The GuC object
894  *
895  * This function detects runtime resume from d3cold by leveraging
896  * GUC_STATUS, GUC doesn't get reset during d3hot,
897  * it strictly to be called from RPM resume handler.
898  *
899  * Return: true if failed to get forcewake or GuC MIA is in Reset,
900  * otherwise false.
901  */
902 bool xe_guc_in_reset(struct xe_guc *guc)
903 {
904 	struct xe_gt *gt = guc_to_gt(guc);
905 	u32 status;
906 	int err;
907 
908 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
909 	if (err)
910 		return true;
911 
912 	status = xe_mmio_read32(gt, GUC_STATUS);
913 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
914 
915 	return  status & GS_MIA_IN_RESET;
916 }
917