1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_guc.h"
7
8 #include <drm/drm_managed.h>
9
10 #include <generated/xe_wa_oob.h>
11
12 #include "abi/guc_actions_abi.h"
13 #include "abi/guc_errors_abi.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_gtt_defs.h"
16 #include "regs/xe_guc_regs.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_force_wake.h"
20 #include "xe_gt.h"
21 #include "xe_gt_printk.h"
22 #include "xe_guc_ads.h"
23 #include "xe_guc_ct.h"
24 #include "xe_guc_hwconfig.h"
25 #include "xe_guc_log.h"
26 #include "xe_guc_pc.h"
27 #include "xe_guc_relay.h"
28 #include "xe_guc_submit.h"
29 #include "xe_memirq.h"
30 #include "xe_mmio.h"
31 #include "xe_platform_types.h"
32 #include "xe_sriov.h"
33 #include "xe_uc.h"
34 #include "xe_uc_fw.h"
35 #include "xe_wa.h"
36 #include "xe_wopcm.h"
37
guc_bo_ggtt_addr(struct xe_guc * guc,struct xe_bo * bo)38 static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
39 struct xe_bo *bo)
40 {
41 struct xe_device *xe = guc_to_xe(guc);
42 u32 addr = xe_bo_ggtt_addr(bo);
43
44 /* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
45 xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
46 xe_assert(xe, addr < GUC_GGTT_TOP);
47 xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
48
49 return addr;
50 }
51
guc_ctl_debug_flags(struct xe_guc * guc)52 static u32 guc_ctl_debug_flags(struct xe_guc *guc)
53 {
54 u32 level = xe_guc_log_get_level(&guc->log);
55 u32 flags = 0;
56
57 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
58 flags |= GUC_LOG_DISABLED;
59 else
60 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
61 GUC_LOG_VERBOSITY_SHIFT;
62
63 return flags;
64 }
65
guc_ctl_feature_flags(struct xe_guc * guc)66 static u32 guc_ctl_feature_flags(struct xe_guc *guc)
67 {
68 u32 flags = 0;
69
70 if (!guc_to_xe(guc)->info.skip_guc_pc)
71 flags |= GUC_CTL_ENABLE_SLPC;
72
73 return flags;
74 }
75
guc_ctl_log_params_flags(struct xe_guc * guc)76 static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
77 {
78 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
79 u32 flags;
80
81 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
82 #define LOG_UNIT SZ_1M
83 #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
84 #else
85 #define LOG_UNIT SZ_4K
86 #define LOG_FLAG 0
87 #endif
88
89 #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
90 #define CAPTURE_UNIT SZ_1M
91 #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
92 #else
93 #define CAPTURE_UNIT SZ_4K
94 #define CAPTURE_FLAG 0
95 #endif
96
97 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
98 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
99 BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
100 BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
101 BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
102 BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
103
104 BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
105 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
106 BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
107 (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
108 BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
109 (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
110
111 flags = GUC_LOG_VALID |
112 GUC_LOG_NOTIFY_ON_HALF_FULL |
113 CAPTURE_FLAG |
114 LOG_FLAG |
115 ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
116 ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
117 ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
118 GUC_LOG_CAPTURE_SHIFT) |
119 (offset << GUC_LOG_BUF_ADDR_SHIFT);
120
121 #undef LOG_UNIT
122 #undef LOG_FLAG
123 #undef CAPTURE_UNIT
124 #undef CAPTURE_FLAG
125
126 return flags;
127 }
128
guc_ctl_ads_flags(struct xe_guc * guc)129 static u32 guc_ctl_ads_flags(struct xe_guc *guc)
130 {
131 u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
132 u32 flags = ads << GUC_ADS_ADDR_SHIFT;
133
134 return flags;
135 }
136
guc_ctl_wa_flags(struct xe_guc * guc)137 static u32 guc_ctl_wa_flags(struct xe_guc *guc)
138 {
139 struct xe_device *xe = guc_to_xe(guc);
140 struct xe_gt *gt = guc_to_gt(guc);
141 u32 flags = 0;
142
143 if (XE_WA(gt, 22012773006))
144 flags |= GUC_WA_POLLCS;
145
146 if (XE_WA(gt, 14014475959))
147 flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
148
149 if (XE_WA(gt, 22011391025))
150 flags |= GUC_WA_DUAL_QUEUE;
151
152 /*
153 * Wa_22011802037: FIXME - there's more to be done than simply setting
154 * this flag: make sure each CS is stopped when preparing for GT reset
155 * and wait for pending MI_FW.
156 */
157 if (GRAPHICS_VERx100(xe) < 1270)
158 flags |= GUC_WA_PRE_PARSER;
159
160 if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
161 flags |= GUC_WA_CONTEXT_ISOLATION;
162
163 if (XE_WA(gt, 18020744125) &&
164 !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
165 flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
166
167 if (XE_WA(gt, 1509372804))
168 flags |= GUC_WA_RENDER_RST_RC6_EXIT;
169
170 if (XE_WA(gt, 14018913170))
171 flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
172
173 return flags;
174 }
175
guc_ctl_devid(struct xe_guc * guc)176 static u32 guc_ctl_devid(struct xe_guc *guc)
177 {
178 struct xe_device *xe = guc_to_xe(guc);
179
180 return (((u32)xe->info.devid) << 16) | xe->info.revid;
181 }
182
guc_print_params(struct xe_guc * guc)183 static void guc_print_params(struct xe_guc *guc)
184 {
185 struct xe_gt *gt = guc_to_gt(guc);
186 u32 *params = guc->params;
187 int i;
188
189 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
190 BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
191
192 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
193 xe_gt_dbg(gt, "GuC param[%2d] = 0x%08x\n", i, params[i]);
194 }
195
guc_init_params(struct xe_guc * guc)196 static void guc_init_params(struct xe_guc *guc)
197 {
198 u32 *params = guc->params;
199
200 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
201 params[GUC_CTL_FEATURE] = 0;
202 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
203 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
204 params[GUC_CTL_WA] = 0;
205 params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
206
207 guc_print_params(guc);
208 }
209
guc_init_params_post_hwconfig(struct xe_guc * guc)210 static void guc_init_params_post_hwconfig(struct xe_guc *guc)
211 {
212 u32 *params = guc->params;
213
214 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
215 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
216 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
217 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
218 params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
219 params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
220
221 guc_print_params(guc);
222 }
223
224 /*
225 * Initialize the GuC parameter block before starting the firmware
226 * transfer. These parameters are read by the firmware on startup
227 * and cannot be changed thereafter.
228 */
guc_write_params(struct xe_guc * guc)229 static void guc_write_params(struct xe_guc *guc)
230 {
231 struct xe_gt *gt = guc_to_gt(guc);
232 int i;
233
234 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
235
236 xe_mmio_write32(gt, SOFT_SCRATCH(0), 0);
237
238 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
239 xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
240 }
241
guc_fini(struct drm_device * drm,void * arg)242 static void guc_fini(struct drm_device *drm, void *arg)
243 {
244 struct xe_guc *guc = arg;
245 struct xe_gt *gt = guc_to_gt(guc);
246
247 xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
248 xe_uc_fini_hw(&guc_to_gt(guc)->uc);
249 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
250 }
251
252 /**
253 * xe_guc_comm_init_early - early initialization of GuC communication
254 * @guc: the &xe_guc to initialize
255 *
256 * Must be called prior to first MMIO communication with GuC firmware.
257 */
xe_guc_comm_init_early(struct xe_guc * guc)258 void xe_guc_comm_init_early(struct xe_guc *guc)
259 {
260 struct xe_gt *gt = guc_to_gt(guc);
261
262 if (xe_gt_is_media_type(gt))
263 guc->notify_reg = MED_GUC_HOST_INTERRUPT;
264 else
265 guc->notify_reg = GUC_HOST_INTERRUPT;
266 }
267
xe_guc_realloc_post_hwconfig(struct xe_guc * guc)268 static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
269 {
270 struct xe_tile *tile = gt_to_tile(guc_to_gt(guc));
271 struct xe_device *xe = guc_to_xe(guc);
272 int ret;
273
274 if (!IS_DGFX(guc_to_xe(guc)))
275 return 0;
276
277 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo);
278 if (ret)
279 return ret;
280
281 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo);
282 if (ret)
283 return ret;
284
285 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo);
286 if (ret)
287 return ret;
288
289 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
290 if (ret)
291 return ret;
292
293 return 0;
294 }
295
xe_guc_init(struct xe_guc * guc)296 int xe_guc_init(struct xe_guc *guc)
297 {
298 struct xe_device *xe = guc_to_xe(guc);
299 struct xe_gt *gt = guc_to_gt(guc);
300 int ret;
301
302 guc->fw.type = XE_UC_FW_TYPE_GUC;
303 ret = xe_uc_fw_init(&guc->fw);
304 if (ret)
305 goto out;
306
307 if (!xe_uc_fw_is_enabled(&guc->fw))
308 return 0;
309
310 ret = xe_guc_log_init(&guc->log);
311 if (ret)
312 goto out;
313
314 ret = xe_guc_ads_init(&guc->ads);
315 if (ret)
316 goto out;
317
318 ret = xe_guc_ct_init(&guc->ct);
319 if (ret)
320 goto out;
321
322 ret = xe_guc_relay_init(&guc->relay);
323 if (ret)
324 goto out;
325
326 ret = drmm_add_action_or_reset(&xe->drm, guc_fini, guc);
327 if (ret)
328 goto out;
329
330 guc_init_params(guc);
331
332 xe_guc_comm_init_early(guc);
333
334 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
335
336 return 0;
337
338 out:
339 xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret));
340 return ret;
341 }
342
343 /**
344 * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
345 * @guc: The GuC object
346 *
347 * Return: 0 on success, negative error code on error.
348 */
xe_guc_init_post_hwconfig(struct xe_guc * guc)349 int xe_guc_init_post_hwconfig(struct xe_guc *guc)
350 {
351 int ret;
352
353 ret = xe_guc_realloc_post_hwconfig(guc);
354 if (ret)
355 return ret;
356
357 guc_init_params_post_hwconfig(guc);
358
359 ret = xe_guc_pc_init(&guc->pc);
360 if (ret)
361 return ret;
362
363 return xe_guc_ads_init_post_hwconfig(&guc->ads);
364 }
365
xe_guc_post_load_init(struct xe_guc * guc)366 int xe_guc_post_load_init(struct xe_guc *guc)
367 {
368 xe_guc_ads_populate_post_load(&guc->ads);
369 guc->submission_state.enabled = true;
370
371 return 0;
372 }
373
xe_guc_reset(struct xe_guc * guc)374 int xe_guc_reset(struct xe_guc *guc)
375 {
376 struct xe_gt *gt = guc_to_gt(guc);
377 u32 guc_status, gdrst;
378 int ret;
379
380 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
381
382 xe_mmio_write32(gt, GDRST, GRDOM_GUC);
383
384 ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
385 if (ret) {
386 xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
387 goto err_out;
388 }
389
390 guc_status = xe_mmio_read32(gt, GUC_STATUS);
391 if (!(guc_status & GS_MIA_IN_RESET)) {
392 xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
393 guc_status);
394 ret = -EIO;
395 goto err_out;
396 }
397
398 return 0;
399
400 err_out:
401
402 return ret;
403 }
404
guc_prepare_xfer(struct xe_guc * guc)405 static void guc_prepare_xfer(struct xe_guc *guc)
406 {
407 struct xe_gt *gt = guc_to_gt(guc);
408 struct xe_device *xe = guc_to_xe(guc);
409 u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
410 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
411 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
412 GUC_ENABLE_MIA_CLOCK_GATING;
413
414 if (GRAPHICS_VERx100(xe) < 1250)
415 shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
416 GUC_ENABLE_MIA_CACHING;
417
418 if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
419 shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
420
421 /* Must program this register before loading the ucode with DMA */
422 xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
423
424 xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
425 }
426
427 /*
428 * Supporting MMIO & in memory RSA
429 */
guc_xfer_rsa(struct xe_guc * guc)430 static int guc_xfer_rsa(struct xe_guc *guc)
431 {
432 struct xe_gt *gt = guc_to_gt(guc);
433 u32 rsa[UOS_RSA_SCRATCH_COUNT];
434 size_t copied;
435 int i;
436
437 if (guc->fw.rsa_size > 256) {
438 u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
439 xe_uc_fw_rsa_offset(&guc->fw);
440 xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
441 return 0;
442 }
443
444 copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
445 if (copied < sizeof(rsa))
446 return -ENOMEM;
447
448 for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
449 xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]);
450
451 return 0;
452 }
453
guc_wait_ucode(struct xe_guc * guc)454 static int guc_wait_ucode(struct xe_guc *guc)
455 {
456 struct xe_gt *gt = guc_to_gt(guc);
457 u32 status;
458 int ret;
459
460 /*
461 * Wait for the GuC to start up.
462 * NB: Docs recommend not using the interrupt for completion.
463 * Measurements indicate this should take no more than 20ms
464 * (assuming the GT clock is at maximum frequency). So, a
465 * timeout here indicates that the GuC has failed and is unusable.
466 * (Higher levels of the driver may decide to reset the GuC and
467 * attempt the ucode load again if this happens.)
468 *
469 * FIXME: There is a known (but exceedingly unlikely) race condition
470 * where the asynchronous frequency management code could reduce
471 * the GT clock while a GuC reload is in progress (during a full
472 * GT reset). A fix is in progress but there are complex locking
473 * issues to be resolved. In the meantime bump the timeout to
474 * 200ms. Even at slowest clock, this should be sufficient. And
475 * in the working case, a larger timeout makes no difference.
476 */
477 ret = xe_mmio_wait32(gt, GUC_STATUS, GS_UKERNEL_MASK,
478 FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
479 200000, &status, false);
480
481 if (ret) {
482 xe_gt_info(gt, "GuC load failed: status = 0x%08X\n", status);
483 xe_gt_info(gt, "GuC status: Reset = %u, BootROM = %#X, UKernel = %#X, MIA = %#X, Auth = %#X\n",
484 REG_FIELD_GET(GS_MIA_IN_RESET, status),
485 REG_FIELD_GET(GS_BOOTROM_MASK, status),
486 REG_FIELD_GET(GS_UKERNEL_MASK, status),
487 REG_FIELD_GET(GS_MIA_MASK, status),
488 REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
489
490 if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
491 xe_gt_info(gt, "GuC firmware signature verification failed\n");
492 ret = -ENOEXEC;
493 }
494
495 if (REG_FIELD_GET(GS_UKERNEL_MASK, status) ==
496 XE_GUC_LOAD_STATUS_EXCEPTION) {
497 xe_gt_info(gt, "GuC firmware exception. EIP: %#x\n",
498 xe_mmio_read32(gt, SOFT_SCRATCH(13)));
499 ret = -ENXIO;
500 }
501 } else {
502 xe_gt_dbg(gt, "GuC successfully loaded\n");
503 }
504
505 return ret;
506 }
507
__xe_guc_upload(struct xe_guc * guc)508 static int __xe_guc_upload(struct xe_guc *guc)
509 {
510 int ret;
511
512 guc_write_params(guc);
513 guc_prepare_xfer(guc);
514
515 /*
516 * Note that GuC needs the CSS header plus uKernel code to be copied
517 * by the DMA engine in one operation, whereas the RSA signature is
518 * loaded separately, either by copying it to the UOS_RSA_SCRATCH
519 * register (if key size <= 256) or through a ggtt-pinned vma (if key
520 * size > 256). The RSA size and therefore the way we provide it to the
521 * HW is fixed for each platform and hard-coded in the bootrom.
522 */
523 ret = guc_xfer_rsa(guc);
524 if (ret)
525 goto out;
526 /*
527 * Current uCode expects the code to be loaded at 8k; locations below
528 * this are used for the stack.
529 */
530 ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
531 if (ret)
532 goto out;
533
534 /* Wait for authentication */
535 ret = guc_wait_ucode(guc);
536 if (ret)
537 goto out;
538
539 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
540 return 0;
541
542 out:
543 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
544 return 0 /* FIXME: ret, don't want to stop load currently */;
545 }
546
547 /**
548 * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
549 * @guc: The GuC object
550 *
551 * This function uploads a minimal GuC that does not support submissions but
552 * in a state where the hwconfig table can be read. Next, it reads and parses
553 * the hwconfig table so it can be used for subsequent steps in the driver load.
554 * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
555 *
556 * Return: 0 on success, negative error code on error.
557 */
xe_guc_min_load_for_hwconfig(struct xe_guc * guc)558 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
559 {
560 int ret;
561
562 xe_guc_ads_populate_minimal(&guc->ads);
563
564 /* Raise GT freq to speed up HuC/GuC load */
565 xe_guc_pc_init_early(&guc->pc);
566
567 ret = __xe_guc_upload(guc);
568 if (ret)
569 return ret;
570
571 ret = xe_guc_hwconfig_init(guc);
572 if (ret)
573 return ret;
574
575 ret = xe_guc_enable_communication(guc);
576 if (ret)
577 return ret;
578
579 return 0;
580 }
581
xe_guc_upload(struct xe_guc * guc)582 int xe_guc_upload(struct xe_guc *guc)
583 {
584 xe_guc_ads_populate(&guc->ads);
585
586 return __xe_guc_upload(guc);
587 }
588
guc_handle_mmio_msg(struct xe_guc * guc)589 static void guc_handle_mmio_msg(struct xe_guc *guc)
590 {
591 struct xe_gt *gt = guc_to_gt(guc);
592 u32 msg;
593
594 if (IS_SRIOV_VF(guc_to_xe(guc)))
595 return;
596
597 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
598
599 msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
600 msg &= XE_GUC_RECV_MSG_EXCEPTION |
601 XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
602 xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
603
604 if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
605 xe_gt_err(gt, "Received early GuC crash dump notification!\n");
606
607 if (msg & XE_GUC_RECV_MSG_EXCEPTION)
608 xe_gt_err(gt, "Received early GuC exception notification!\n");
609 }
610
guc_enable_irq(struct xe_guc * guc)611 static void guc_enable_irq(struct xe_guc *guc)
612 {
613 struct xe_gt *gt = guc_to_gt(guc);
614 u32 events = xe_gt_is_media_type(gt) ?
615 REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST) :
616 REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
617
618 /* Primary GuC and media GuC share a single enable bit */
619 xe_mmio_write32(gt, GUC_SG_INTR_ENABLE,
620 REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
621
622 /*
623 * There are separate mask bits for primary and media GuCs, so use
624 * a RMW operation to avoid clobbering the other GuC's setting.
625 */
626 xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0);
627 }
628
xe_guc_enable_communication(struct xe_guc * guc)629 int xe_guc_enable_communication(struct xe_guc *guc)
630 {
631 struct xe_device *xe = guc_to_xe(guc);
632 int err;
633
634 guc_enable_irq(guc);
635
636 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
637 struct xe_gt *gt = guc_to_gt(guc);
638 struct xe_tile *tile = gt_to_tile(gt);
639
640 err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
641 if (err)
642 return err;
643 }
644
645 xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
646 ARAT_EXPIRED_INTRMSK, 0);
647
648 err = xe_guc_ct_enable(&guc->ct);
649 if (err)
650 return err;
651
652 guc_handle_mmio_msg(guc);
653
654 return 0;
655 }
656
xe_guc_suspend(struct xe_guc * guc)657 int xe_guc_suspend(struct xe_guc *guc)
658 {
659 struct xe_gt *gt = guc_to_gt(guc);
660 u32 action[] = {
661 XE_GUC_ACTION_CLIENT_SOFT_RESET,
662 };
663 int ret;
664
665 ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
666 if (ret) {
667 xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret));
668 return ret;
669 }
670
671 xe_guc_sanitize(guc);
672 return 0;
673 }
674
xe_guc_notify(struct xe_guc * guc)675 void xe_guc_notify(struct xe_guc *guc)
676 {
677 struct xe_gt *gt = guc_to_gt(guc);
678 const u32 default_notify_data = 0;
679
680 /*
681 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
682 * additional payload data to the GuC but this capability is not
683 * used by the firmware yet. Use default value in the meantime.
684 */
685 xe_mmio_write32(gt, guc->notify_reg, default_notify_data);
686 }
687
xe_guc_auth_huc(struct xe_guc * guc,u32 rsa_addr)688 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
689 {
690 u32 action[] = {
691 XE_GUC_ACTION_AUTHENTICATE_HUC,
692 rsa_addr
693 };
694
695 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
696 }
697
xe_guc_mmio_send_recv(struct xe_guc * guc,const u32 * request,u32 len,u32 * response_buf)698 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
699 u32 len, u32 *response_buf)
700 {
701 struct xe_device *xe = guc_to_xe(guc);
702 struct xe_gt *gt = guc_to_gt(guc);
703 u32 header, reply;
704 struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
705 MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
706 const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
707 int ret;
708 int i;
709
710 BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
711
712 xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
713 xe_assert(xe, len);
714 xe_assert(xe, len <= VF_SW_FLAG_COUNT);
715 xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
716 xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
717 GUC_HXG_ORIGIN_HOST);
718 xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
719 GUC_HXG_TYPE_REQUEST);
720
721 retry:
722 /* Not in critical data-path, just do if else for GT type */
723 if (xe_gt_is_media_type(gt)) {
724 for (i = 0; i < len; ++i)
725 xe_mmio_write32(gt, MED_VF_SW_FLAG(i),
726 request[i]);
727 xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX));
728 } else {
729 for (i = 0; i < len; ++i)
730 xe_mmio_write32(gt, VF_SW_FLAG(i),
731 request[i]);
732 xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX));
733 }
734
735 xe_guc_notify(guc);
736
737 ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
738 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
739 50000, &reply, false);
740 if (ret) {
741 timeout:
742 xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
743 request[0], reply);
744 return ret;
745 }
746
747 header = xe_mmio_read32(gt, reply_reg);
748 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
749 GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
750 /*
751 * Once we got a BUSY reply we must wait again for the final
752 * response but this time we can't use ORIGIN mask anymore.
753 * To spot a right change in the reply, we take advantage that
754 * response SUCCESS and FAILURE differ only by the single bit
755 * and all other bits are set and can be used as a new mask.
756 */
757 u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
758 u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
759
760 BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
761 BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
762
763 ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask,
764 1000000, &header, false);
765
766 if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
767 GUC_HXG_ORIGIN_GUC))
768 goto proto;
769 if (unlikely(ret)) {
770 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
771 GUC_HXG_TYPE_NO_RESPONSE_BUSY)
772 goto proto;
773 goto timeout;
774 }
775 }
776
777 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
778 GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
779 u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
780
781 xe_gt_dbg(gt, "GuC mmio request %#x: retrying, reason %#x\n",
782 request[0], reason);
783 goto retry;
784 }
785
786 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
787 GUC_HXG_TYPE_RESPONSE_FAILURE) {
788 u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
789 u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
790
791 xe_gt_err(gt, "GuC mmio request %#x: failure %#x hint %#x\n",
792 request[0], error, hint);
793 return -ENXIO;
794 }
795
796 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
797 GUC_HXG_TYPE_RESPONSE_SUCCESS) {
798 proto:
799 xe_gt_err(gt, "GuC mmio request %#x: unexpected reply %#x\n",
800 request[0], header);
801 return -EPROTO;
802 }
803
804 /* Just copy entire possible message response */
805 if (response_buf) {
806 response_buf[0] = header;
807
808 for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
809 reply_reg.addr += sizeof(u32);
810 response_buf[i] = xe_mmio_read32(gt, reply_reg);
811 }
812 }
813
814 /* Use data from the GuC response as our return value */
815 return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
816 }
817
xe_guc_mmio_send(struct xe_guc * guc,const u32 * request,u32 len)818 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
819 {
820 return xe_guc_mmio_send_recv(guc, request, len, NULL);
821 }
822
guc_self_cfg(struct xe_guc * guc,u16 key,u16 len,u64 val)823 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
824 {
825 struct xe_device *xe = guc_to_xe(guc);
826 u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
827 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
828 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
829 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
830 GUC_ACTION_HOST2GUC_SELF_CFG),
831 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
832 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
833 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
834 lower_32_bits(val)),
835 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
836 upper_32_bits(val)),
837 };
838 int ret;
839
840 xe_assert(xe, len <= 2);
841 xe_assert(xe, len != 1 || !upper_32_bits(val));
842
843 /* Self config must go over MMIO */
844 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
845
846 if (unlikely(ret < 0))
847 return ret;
848 if (unlikely(ret > 1))
849 return -EPROTO;
850 if (unlikely(!ret))
851 return -ENOKEY;
852
853 return 0;
854 }
855
xe_guc_self_cfg32(struct xe_guc * guc,u16 key,u32 val)856 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
857 {
858 return guc_self_cfg(guc, key, 1, val);
859 }
860
xe_guc_self_cfg64(struct xe_guc * guc,u16 key,u64 val)861 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
862 {
863 return guc_self_cfg(guc, key, 2, val);
864 }
865
xe_guc_irq_handler(struct xe_guc * guc,const u16 iir)866 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
867 {
868 if (iir & GUC_INTR_GUC2HOST)
869 xe_guc_ct_irq_handler(&guc->ct);
870 }
871
xe_guc_sanitize(struct xe_guc * guc)872 void xe_guc_sanitize(struct xe_guc *guc)
873 {
874 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
875 xe_guc_ct_disable(&guc->ct);
876 guc->submission_state.enabled = false;
877 }
878
xe_guc_reset_prepare(struct xe_guc * guc)879 int xe_guc_reset_prepare(struct xe_guc *guc)
880 {
881 return xe_guc_submit_reset_prepare(guc);
882 }
883
xe_guc_reset_wait(struct xe_guc * guc)884 void xe_guc_reset_wait(struct xe_guc *guc)
885 {
886 xe_guc_submit_reset_wait(guc);
887 }
888
xe_guc_stop_prepare(struct xe_guc * guc)889 void xe_guc_stop_prepare(struct xe_guc *guc)
890 {
891 XE_WARN_ON(xe_guc_pc_stop(&guc->pc));
892 }
893
xe_guc_stop(struct xe_guc * guc)894 int xe_guc_stop(struct xe_guc *guc)
895 {
896 int ret;
897
898 xe_guc_ct_stop(&guc->ct);
899
900 ret = xe_guc_submit_stop(guc);
901 if (ret)
902 return ret;
903
904 return 0;
905 }
906
xe_guc_start(struct xe_guc * guc)907 int xe_guc_start(struct xe_guc *guc)
908 {
909 int ret;
910
911 ret = xe_guc_pc_start(&guc->pc);
912 XE_WARN_ON(ret);
913
914 return xe_guc_submit_start(guc);
915 }
916
xe_guc_print_info(struct xe_guc * guc,struct drm_printer * p)917 void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
918 {
919 struct xe_gt *gt = guc_to_gt(guc);
920 u32 status;
921 int err;
922 int i;
923
924 xe_uc_fw_print(&guc->fw, p);
925
926 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
927 if (err)
928 return;
929
930 status = xe_mmio_read32(gt, GUC_STATUS);
931
932 drm_printf(p, "\nGuC status 0x%08x:\n", status);
933 drm_printf(p, "\tBootrom status = 0x%x\n",
934 REG_FIELD_GET(GS_BOOTROM_MASK, status));
935 drm_printf(p, "\tuKernel status = 0x%x\n",
936 REG_FIELD_GET(GS_UKERNEL_MASK, status));
937 drm_printf(p, "\tMIA Core status = 0x%x\n",
938 REG_FIELD_GET(GS_MIA_MASK, status));
939 drm_printf(p, "\tLog level = %d\n",
940 xe_guc_log_get_level(&guc->log));
941
942 drm_puts(p, "\nScratch registers:\n");
943 for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
944 drm_printf(p, "\t%2d: \t0x%x\n",
945 i, xe_mmio_read32(gt, SOFT_SCRATCH(i)));
946 }
947
948 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
949
950 xe_guc_ct_print(&guc->ct, p, false);
951 xe_guc_submit_print(guc, p);
952 }
953
954 /**
955 * xe_guc_in_reset() - Detect if GuC MIA is in reset.
956 * @guc: The GuC object
957 *
958 * This function detects runtime resume from d3cold by leveraging
959 * GUC_STATUS, GUC doesn't get reset during d3hot,
960 * it strictly to be called from RPM resume handler.
961 *
962 * Return: true if failed to get forcewake or GuC MIA is in Reset,
963 * otherwise false.
964 */
xe_guc_in_reset(struct xe_guc * guc)965 bool xe_guc_in_reset(struct xe_guc *guc)
966 {
967 struct xe_gt *gt = guc_to_gt(guc);
968 u32 status;
969 int err;
970
971 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
972 if (err)
973 return true;
974
975 status = xe_mmio_read32(gt, GUC_STATUS);
976 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
977
978 return status & GS_MIA_IN_RESET;
979 }
980