1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2016-2019 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
9
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
12
13 #include "gem/i915_gem_lmem.h"
14 #include "gt/intel_gt.h"
15 #include "gt/intel_gt_print.h"
16 #include "intel_gsc_binary_headers.h"
17 #include "intel_gsc_fw.h"
18 #include "intel_uc_fw.h"
19 #include "intel_uc_fw_abi.h"
20 #include "i915_drv.h"
21 #include "i915_reg.h"
22
23 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
24 #define UNEXPECTED gt_probe_error
25 #else
26 #define UNEXPECTED gt_notice
27 #endif
28
29 static inline struct intel_gt *
____uc_fw_to_gt(struct intel_uc_fw * uc_fw,enum intel_uc_fw_type type)30 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
31 {
32 GEM_BUG_ON(type >= INTEL_UC_FW_NUM_TYPES);
33
34 switch (type) {
35 case INTEL_UC_FW_TYPE_GUC:
36 return container_of(uc_fw, struct intel_gt, uc.guc.fw);
37 case INTEL_UC_FW_TYPE_HUC:
38 return container_of(uc_fw, struct intel_gt, uc.huc.fw);
39 case INTEL_UC_FW_TYPE_GSC:
40 return container_of(uc_fw, struct intel_gt, uc.gsc.fw);
41 }
42
43 return NULL;
44 }
45
__uc_fw_to_gt(struct intel_uc_fw * uc_fw)46 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
47 {
48 GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
49 return ____uc_fw_to_gt(uc_fw, uc_fw->type);
50 }
51
52 #ifdef CONFIG_DRM_I915_DEBUG_GUC
intel_uc_fw_change_status(struct intel_uc_fw * uc_fw,enum intel_uc_fw_status status)53 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
54 enum intel_uc_fw_status status)
55 {
56 uc_fw->__status = status;
57 gt_dbg(__uc_fw_to_gt(uc_fw), "%s firmware -> %s\n",
58 intel_uc_fw_type_repr(uc_fw->type),
59 status == INTEL_UC_FIRMWARE_SELECTED ?
60 uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
61 }
62 #endif
63
64 /*
65 * List of required GuC and HuC binaries per-platform.
66 * Must be ordered based on platform + revid, from newer to older.
67 *
68 * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
69 * firmware as TGL.
70 *
71 * Version numbers:
72 * Originally, the driver required an exact match major/minor/patch furmware
73 * file and only supported that one version for any given platform. However,
74 * the new direction from upstream is to be backwards compatible with all
75 * prior releases and to be as flexible as possible as to what firmware is
76 * loaded.
77 *
78 * For GuC, the major version number signifies a backwards breaking API change.
79 * So, new format GuC firmware files are labelled by their major version only.
80 * For HuC, there is no KMD interaction, hence no version matching requirement.
81 * So, new format HuC firmware files have no version number at all.
82 *
83 * All of which means that the table below must keep all old format files with
84 * full three point version number. But newer files have reduced requirements.
85 * Having said that, the driver still needs to track the minor version number
86 * for GuC at least. As it is useful to report to the user that they are not
87 * running with a recent enough version for all KMD supported features,
88 * security fixes, etc. to be enabled.
89 */
90 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
91 fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 6, 6)) \
92 fw_def(DG2, 0, guc_maj(dg2, 70, 5, 1)) \
93 fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5, 1)) \
94 fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
95 fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
96 fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5, 1)) \
97 fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
98 fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
99 fw_def(DG1, 0, guc_maj(dg1, 70, 5, 1)) \
100 fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
101 fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
102 fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
103 fw_def(ELKHARTLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
104 fw_def(ICELAKE, 0, guc_mmp(icl, 70, 1, 1)) \
105 fw_def(COMETLAKE, 5, guc_mmp(cml, 70, 1, 1)) \
106 fw_def(COMETLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
107 fw_def(COFFEELAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
108 fw_def(GEMINILAKE, 0, guc_mmp(glk, 70, 1, 1)) \
109 fw_def(KABYLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
110 fw_def(BROXTON, 0, guc_mmp(bxt, 70, 1, 1)) \
111 fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
112
113 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp, huc_gsc) \
114 fw_def(METEORLAKE, 0, huc_gsc(mtl)) \
115 fw_def(DG2, 0, huc_gsc(dg2)) \
116 fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
117 fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
118 fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
119 fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
120 fw_def(DG1, 0, huc_raw(dg1)) \
121 fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
122 fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
123 fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
124 fw_def(ELKHARTLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
125 fw_def(ICELAKE, 0, huc_mmp(icl, 9, 0, 0)) \
126 fw_def(COMETLAKE, 5, huc_mmp(cml, 4, 0, 0)) \
127 fw_def(COMETLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
128 fw_def(COFFEELAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
129 fw_def(GEMINILAKE, 0, huc_mmp(glk, 4, 0, 0)) \
130 fw_def(KABYLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
131 fw_def(BROXTON, 0, huc_mmp(bxt, 2, 0, 0)) \
132 fw_def(SKYLAKE, 0, huc_mmp(skl, 2, 0, 0))
133
134 /*
135 * Set of macros for producing a list of filenames from the above table.
136 */
137 #define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
138 "i915/" \
139 __stringify(prefix_) "_" name_ ".bin"
140
141 #define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
142 "i915/" \
143 __stringify(prefix_) "_" name_ "_" \
144 __stringify(major_) ".bin"
145
146 #define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
147 "i915/" \
148 __stringify(prefix_) "_" name_ "_" \
149 __stringify(major_) "." \
150 __stringify(minor_) "." \
151 __stringify(patch_) ".bin"
152
153 /* Minor for internal driver use, not part of file name */
154 #define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_, patch_) \
155 __MAKE_UC_FW_PATH_MAJOR(prefix_, "guc", major_)
156
157 #define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
158 __MAKE_UC_FW_PATH_MMP(prefix_, "guc", major_, minor_, patch_)
159
160 #define MAKE_HUC_FW_PATH_BLANK(prefix_) \
161 __MAKE_UC_FW_PATH_BLANK(prefix_, "huc")
162
163 #define MAKE_HUC_FW_PATH_GSC(prefix_) \
164 __MAKE_UC_FW_PATH_BLANK(prefix_, "huc_gsc")
165
166 #define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
167 __MAKE_UC_FW_PATH_MMP(prefix_, "huc", major_, minor_, patch_)
168
169 /*
170 * All blobs need to be declared via MODULE_FIRMWARE().
171 * This first expansion of the table macros is solely to provide
172 * that declaration.
173 */
174 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
175 MODULE_FIRMWARE(uc_);
176
177 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP)
178 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP, MAKE_HUC_FW_PATH_GSC)
179
180 /*
181 * The next expansion of the table macros (in __uc_fw_auto_select below) provides
182 * actual data structures with both the filename and the version information.
183 * These structure arrays are then iterated over to the list of suitable files
184 * for the current platform and to then attempt to load those files, in the order
185 * listed, until one is successfully found.
186 */
187 struct __packed uc_fw_blob {
188 const char *path;
189 bool legacy;
190 u8 major;
191 u8 minor;
192 u8 patch;
193 bool has_gsc_headers;
194 };
195
196 #define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
197 .major = major_, \
198 .minor = minor_, \
199 .patch = patch_, \
200 .path = path_,
201
202 #define UC_FW_BLOB_NEW(major_, minor_, patch_, gsc_, path_) \
203 { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
204 .legacy = false, .has_gsc_headers = gsc_ }
205
206 #define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \
207 { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
208 .legacy = true }
209
210 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
211 UC_FW_BLOB_NEW(major_, minor_, patch_, false, \
212 MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_, patch_))
213
214 #define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
215 UC_FW_BLOB_OLD(major_, minor_, patch_, \
216 MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
217
218 #define HUC_FW_BLOB(prefix_) \
219 UC_FW_BLOB_NEW(0, 0, 0, false, MAKE_HUC_FW_PATH_BLANK(prefix_))
220
221 #define HUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
222 UC_FW_BLOB_OLD(major_, minor_, patch_, \
223 MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
224
225 #define HUC_FW_BLOB_GSC(prefix_) \
226 UC_FW_BLOB_NEW(0, 0, 0, true, MAKE_HUC_FW_PATH_GSC(prefix_))
227
228 struct __packed uc_fw_platform_requirement {
229 enum intel_platform p;
230 u8 rev; /* first platform rev using this FW */
231 const struct uc_fw_blob blob;
232 };
233
234 #define MAKE_FW_LIST(platform_, revid_, uc_) \
235 { \
236 .p = INTEL_##platform_, \
237 .rev = revid_, \
238 .blob = uc_, \
239 },
240
241 struct fw_blobs_by_type {
242 const struct uc_fw_platform_requirement *blobs;
243 u32 count;
244 };
245
246 static const struct uc_fw_platform_requirement blobs_guc[] = {
247 INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
248 };
249
250 static const struct uc_fw_platform_requirement blobs_huc[] = {
251 INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP, HUC_FW_BLOB_GSC)
252 };
253
254 static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
255 [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
256 [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
257 };
258
259 static void
__uc_fw_auto_select(struct drm_i915_private * i915,struct intel_uc_fw * uc_fw)260 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
261 {
262 const struct uc_fw_platform_requirement *fw_blobs;
263 enum intel_platform p = INTEL_INFO(i915)->platform;
264 u32 fw_count;
265 u8 rev = INTEL_REVID(i915);
266 int i;
267 bool found;
268
269 /*
270 * GSC FW support is still not fully in place, so we're not defining
271 * the FW blob yet because we don't want the driver to attempt to load
272 * it until we're ready for it.
273 */
274 if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
275 return;
276
277 /*
278 * The only difference between the ADL GuC FWs is the HWConfig support.
279 * ADL-N does not support HWConfig, so we should use the same binary as
280 * ADL-S, otherwise the GuC might attempt to fetch a config table that
281 * does not exist.
282 */
283 if (IS_ALDERLAKE_P_N(i915))
284 p = INTEL_ALDERLAKE_S;
285
286 GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
287 fw_blobs = blobs_all[uc_fw->type].blobs;
288 fw_count = blobs_all[uc_fw->type].count;
289
290 found = false;
291 for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
292 const struct uc_fw_blob *blob = &fw_blobs[i].blob;
293
294 if (p != fw_blobs[i].p)
295 continue;
296
297 if (rev < fw_blobs[i].rev)
298 continue;
299
300 if (uc_fw->file_selected.path) {
301 /*
302 * Continuing an earlier search after a found blob failed to load.
303 * Once the previously chosen path has been found, clear it out
304 * and let the search continue from there.
305 */
306 if (uc_fw->file_selected.path == blob->path)
307 uc_fw->file_selected.path = NULL;
308
309 continue;
310 }
311
312 uc_fw->file_selected.path = blob->path;
313 uc_fw->file_wanted.path = blob->path;
314 uc_fw->file_wanted.ver.major = blob->major;
315 uc_fw->file_wanted.ver.minor = blob->minor;
316 uc_fw->file_wanted.ver.patch = blob->patch;
317 uc_fw->has_gsc_headers = blob->has_gsc_headers;
318 found = true;
319 break;
320 }
321
322 if (!found && uc_fw->file_selected.path) {
323 /* Failed to find a match for the last attempt?! */
324 uc_fw->file_selected.path = NULL;
325 }
326 }
327
validate_fw_table_type(struct drm_i915_private * i915,enum intel_uc_fw_type type)328 static bool validate_fw_table_type(struct drm_i915_private *i915, enum intel_uc_fw_type type)
329 {
330 const struct uc_fw_platform_requirement *fw_blobs;
331 u32 fw_count;
332 int i, j;
333
334 if (type >= ARRAY_SIZE(blobs_all)) {
335 drm_err(&i915->drm, "No blob array for %s\n", intel_uc_fw_type_repr(type));
336 return false;
337 }
338
339 fw_blobs = blobs_all[type].blobs;
340 fw_count = blobs_all[type].count;
341
342 if (!fw_count)
343 return true;
344
345 /* make sure the list is ordered as expected */
346 for (i = 1; i < fw_count; i++) {
347 /* Versionless file names must be unique per platform: */
348 for (j = i + 1; j < fw_count; j++) {
349 /* Same platform? */
350 if (fw_blobs[i].p != fw_blobs[j].p)
351 continue;
352
353 if (fw_blobs[i].blob.path != fw_blobs[j].blob.path)
354 continue;
355
356 drm_err(&i915->drm, "Duplicate %s blobs: %s r%u %s%d.%d.%d [%s] matches %s%d.%d.%d [%s]\n",
357 intel_uc_fw_type_repr(type),
358 intel_platform_name(fw_blobs[j].p), fw_blobs[j].rev,
359 fw_blobs[j].blob.legacy ? "L" : "v",
360 fw_blobs[j].blob.major, fw_blobs[j].blob.minor,
361 fw_blobs[j].blob.patch, fw_blobs[j].blob.path,
362 fw_blobs[i].blob.legacy ? "L" : "v",
363 fw_blobs[i].blob.major, fw_blobs[i].blob.minor,
364 fw_blobs[i].blob.patch, fw_blobs[i].blob.path);
365 }
366
367 /* Next platform is good: */
368 if (fw_blobs[i].p < fw_blobs[i - 1].p)
369 continue;
370
371 /* Next platform revision is good: */
372 if (fw_blobs[i].p == fw_blobs[i - 1].p &&
373 fw_blobs[i].rev < fw_blobs[i - 1].rev)
374 continue;
375
376 /* Platform/revision must be in order: */
377 if (fw_blobs[i].p != fw_blobs[i - 1].p ||
378 fw_blobs[i].rev != fw_blobs[i - 1].rev)
379 goto bad;
380
381 /* Next major version is good: */
382 if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
383 continue;
384
385 /* New must be before legacy: */
386 if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
387 goto bad;
388
389 /* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
390 if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
391 if (!fw_blobs[i - 1].blob.major)
392 continue;
393
394 if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
395 continue;
396 }
397
398 /* Major versions must be in order: */
399 if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
400 goto bad;
401
402 /* Next minor version is good: */
403 if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
404 continue;
405
406 /* Minor versions must be in order: */
407 if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
408 goto bad;
409
410 /* Patch versions must be in order and unique: */
411 if (fw_blobs[i].blob.patch < fw_blobs[i - 1].blob.patch)
412 continue;
413
414 bad:
415 drm_err(&i915->drm, "Invalid %s blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
416 intel_uc_fw_type_repr(type),
417 intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
418 fw_blobs[i - 1].blob.legacy ? "L" : "v",
419 fw_blobs[i - 1].blob.major,
420 fw_blobs[i - 1].blob.minor,
421 fw_blobs[i - 1].blob.patch,
422 intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
423 fw_blobs[i].blob.legacy ? "L" : "v",
424 fw_blobs[i].blob.major,
425 fw_blobs[i].blob.minor,
426 fw_blobs[i].blob.patch);
427 return false;
428 }
429
430 return true;
431 }
432
__override_guc_firmware_path(struct drm_i915_private * i915)433 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
434 {
435 if (i915->params.enable_guc & ENABLE_GUC_MASK)
436 return i915->params.guc_firmware_path;
437 return "";
438 }
439
__override_huc_firmware_path(struct drm_i915_private * i915)440 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
441 {
442 if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
443 return i915->params.huc_firmware_path;
444 return "";
445 }
446
__override_gsc_firmware_path(struct drm_i915_private * i915)447 static const char *__override_gsc_firmware_path(struct drm_i915_private *i915)
448 {
449 return i915->params.gsc_firmware_path;
450 }
451
__uc_fw_user_override(struct drm_i915_private * i915,struct intel_uc_fw * uc_fw)452 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
453 {
454 const char *path = NULL;
455
456 switch (uc_fw->type) {
457 case INTEL_UC_FW_TYPE_GUC:
458 path = __override_guc_firmware_path(i915);
459 break;
460 case INTEL_UC_FW_TYPE_HUC:
461 path = __override_huc_firmware_path(i915);
462 break;
463 case INTEL_UC_FW_TYPE_GSC:
464 path = __override_gsc_firmware_path(i915);
465 break;
466 }
467
468 if (unlikely(path)) {
469 uc_fw->file_selected.path = path;
470 uc_fw->user_overridden = true;
471 }
472 }
473
intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver * ver,const void * data)474 void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
475 const void *data)
476 {
477 const struct intel_gsc_manifest_header *manifest = data;
478
479 ver->major = manifest->fw_version.major;
480 ver->minor = manifest->fw_version.minor;
481 ver->patch = manifest->fw_version.hotfix;
482 ver->build = manifest->fw_version.build;
483 }
484
485 /**
486 * intel_uc_fw_init_early - initialize the uC object and select the firmware
487 * @uc_fw: uC firmware
488 * @type: type of uC
489 * @needs_ggtt_mapping: whether the FW needs to be GGTT mapped for loading
490 *
491 * Initialize the state of our uC object and relevant tracking and select the
492 * firmware to fetch and load.
493 */
intel_uc_fw_init_early(struct intel_uc_fw * uc_fw,enum intel_uc_fw_type type,bool needs_ggtt_mapping)494 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
495 enum intel_uc_fw_type type,
496 bool needs_ggtt_mapping)
497 {
498 struct intel_gt *gt = ____uc_fw_to_gt(uc_fw, type);
499 struct drm_i915_private *i915 = gt->i915;
500
501 /*
502 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
503 * before we're looked at the HW caps to see if we have uc support
504 */
505 BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
506 GEM_BUG_ON(uc_fw->status);
507 GEM_BUG_ON(uc_fw->file_selected.path);
508
509 uc_fw->type = type;
510 uc_fw->needs_ggtt_mapping = needs_ggtt_mapping;
511
512 if (HAS_GT_UC(i915)) {
513 if (!validate_fw_table_type(i915, type)) {
514 gt->uc.fw_table_invalid = true;
515 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
516 return;
517 }
518
519 __uc_fw_auto_select(i915, uc_fw);
520 __uc_fw_user_override(i915, uc_fw);
521 }
522
523 intel_uc_fw_change_status(uc_fw, uc_fw->file_selected.path ? *uc_fw->file_selected.path ?
524 INTEL_UC_FIRMWARE_SELECTED :
525 INTEL_UC_FIRMWARE_DISABLED :
526 INTEL_UC_FIRMWARE_NOT_SUPPORTED);
527 }
528
__force_fw_fetch_failures(struct intel_uc_fw * uc_fw,int e)529 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
530 {
531 struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
532 bool user = e == -EINVAL;
533
534 if (i915_inject_probe_error(i915, e)) {
535 /* non-existing blob */
536 uc_fw->file_selected.path = "<invalid>";
537 uc_fw->user_overridden = user;
538 } else if (i915_inject_probe_error(i915, e)) {
539 /* require next major version */
540 uc_fw->file_wanted.ver.major += 1;
541 uc_fw->file_wanted.ver.minor = 0;
542 uc_fw->user_overridden = user;
543 } else if (i915_inject_probe_error(i915, e)) {
544 /* require next minor version */
545 uc_fw->file_wanted.ver.minor += 1;
546 uc_fw->user_overridden = user;
547 } else if (uc_fw->file_wanted.ver.major &&
548 i915_inject_probe_error(i915, e)) {
549 /* require prev major version */
550 uc_fw->file_wanted.ver.major -= 1;
551 uc_fw->file_wanted.ver.minor = 0;
552 uc_fw->user_overridden = user;
553 } else if (uc_fw->file_wanted.ver.minor &&
554 i915_inject_probe_error(i915, e)) {
555 /* require prev minor version - hey, this should work! */
556 uc_fw->file_wanted.ver.minor -= 1;
557 uc_fw->user_overridden = user;
558 } else if (user && i915_inject_probe_error(i915, e)) {
559 /* officially unsupported platform */
560 uc_fw->file_wanted.ver.major = 0;
561 uc_fw->file_wanted.ver.minor = 0;
562 uc_fw->user_overridden = true;
563 }
564 }
565
uc_unpack_css_version(struct intel_uc_fw_ver * ver,u32 css_value)566 static void uc_unpack_css_version(struct intel_uc_fw_ver *ver, u32 css_value)
567 {
568 /* Get version numbers from the CSS header */
569 ver->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css_value);
570 ver->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css_value);
571 ver->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css_value);
572 }
573
guc_read_css_info(struct intel_uc_fw * uc_fw,struct uc_css_header * css)574 static void guc_read_css_info(struct intel_uc_fw *uc_fw, struct uc_css_header *css)
575 {
576 struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
577
578 /*
579 * The GuC firmware includes an extra version number to specify the
580 * submission API level. This allows submission code to work with
581 * multiple GuC versions without having to know the absolute firmware
582 * version number (there are likely to be multiple firmware releases
583 * which all support the same submission API level).
584 *
585 * Note that the spec for the CSS header defines this version number
586 * as 'vf_version' as it was originally intended for virtualisation.
587 * However, it is applicable to native submission as well.
588 *
589 * Unfortunately, due to an oversight, this version number was only
590 * exposed in the CSS header from v70.6.0.
591 */
592 if (uc_fw->file_selected.ver.major >= 70) {
593 if (uc_fw->file_selected.ver.minor >= 6) {
594 /* v70.6.0 adds CSS header support */
595 uc_unpack_css_version(&guc->submission_version, css->vf_version);
596 } else if (uc_fw->file_selected.ver.minor >= 3) {
597 /* v70.3.0 introduced v1.1.0 */
598 guc->submission_version.major = 1;
599 guc->submission_version.minor = 1;
600 guc->submission_version.patch = 0;
601 } else {
602 /* v70.0.0 introduced v1.0.0 */
603 guc->submission_version.major = 1;
604 guc->submission_version.minor = 0;
605 guc->submission_version.patch = 0;
606 }
607 } else if (uc_fw->file_selected.ver.major >= 69) {
608 /* v69.0.0 introduced v0.10.0 */
609 guc->submission_version.major = 0;
610 guc->submission_version.minor = 10;
611 guc->submission_version.patch = 0;
612 } else {
613 /* Prior versions were v0.1.0 */
614 guc->submission_version.major = 0;
615 guc->submission_version.minor = 1;
616 guc->submission_version.patch = 0;
617 }
618
619 uc_fw->private_data_size = css->private_data_size;
620 }
621
__check_ccs_header(struct intel_gt * gt,const void * fw_data,size_t fw_size,struct intel_uc_fw * uc_fw)622 static int __check_ccs_header(struct intel_gt *gt,
623 const void *fw_data, size_t fw_size,
624 struct intel_uc_fw *uc_fw)
625 {
626 struct uc_css_header *css;
627 size_t size;
628
629 /* Check the size of the blob before examining buffer contents */
630 if (unlikely(fw_size < sizeof(struct uc_css_header))) {
631 gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
632 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
633 fw_size, sizeof(struct uc_css_header));
634 return -ENODATA;
635 }
636
637 css = (struct uc_css_header *)fw_data;
638
639 /* Check integrity of size values inside CSS header */
640 size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
641 css->exponent_size_dw) * sizeof(u32);
642 if (unlikely(size != sizeof(struct uc_css_header))) {
643 gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n",
644 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
645 fw_size, sizeof(struct uc_css_header));
646 return -EPROTO;
647 }
648
649 /* uCode size must calculated from other sizes */
650 uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
651
652 /* now RSA */
653 uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
654
655 /* At least, it should have header, uCode and RSA. Size of all three. */
656 size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
657 if (unlikely(fw_size < size)) {
658 gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
659 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
660 fw_size, size);
661 return -ENOEXEC;
662 }
663
664 /* Sanity check whether this fw is not larger than whole WOPCM memory */
665 size = __intel_uc_fw_get_upload_size(uc_fw);
666 if (unlikely(size >= gt->wopcm.size)) {
667 gt_warn(gt, "%s firmware %s: invalid size: %zu > %zu\n",
668 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
669 size, (size_t)gt->wopcm.size);
670 return -E2BIG;
671 }
672
673 uc_unpack_css_version(&uc_fw->file_selected.ver, css->sw_version);
674
675 if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
676 guc_read_css_info(uc_fw, css);
677
678 return 0;
679 }
680
check_gsc_manifest(struct intel_gt * gt,const struct firmware * fw,struct intel_uc_fw * uc_fw)681 static int check_gsc_manifest(struct intel_gt *gt,
682 const struct firmware *fw,
683 struct intel_uc_fw *uc_fw)
684 {
685 switch (uc_fw->type) {
686 case INTEL_UC_FW_TYPE_HUC:
687 intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
688 break;
689 case INTEL_UC_FW_TYPE_GSC:
690 intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
691 break;
692 default:
693 MISSING_CASE(uc_fw->type);
694 return -EINVAL;
695 }
696
697 if (uc_fw->dma_start_offset) {
698 u32 delta = uc_fw->dma_start_offset;
699
700 __check_ccs_header(gt, fw->data + delta, fw->size - delta, uc_fw);
701 }
702
703 return 0;
704 }
705
check_ccs_header(struct intel_gt * gt,const struct firmware * fw,struct intel_uc_fw * uc_fw)706 static int check_ccs_header(struct intel_gt *gt,
707 const struct firmware *fw,
708 struct intel_uc_fw *uc_fw)
709 {
710 return __check_ccs_header(gt, fw->data, fw->size, uc_fw);
711 }
712
is_ver_8bit(struct intel_uc_fw_ver * ver)713 static bool is_ver_8bit(struct intel_uc_fw_ver *ver)
714 {
715 return ver->major < 0xFF && ver->minor < 0xFF && ver->patch < 0xFF;
716 }
717
guc_check_version_range(struct intel_uc_fw * uc_fw)718 static int guc_check_version_range(struct intel_uc_fw *uc_fw)
719 {
720 struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
721 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
722
723 /*
724 * GuC version number components are defined as being 8-bits.
725 * The submission code relies on this to optimise version comparison
726 * tests. So enforce the restriction here.
727 */
728
729 if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
730 gt_warn(gt, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
731 intel_uc_fw_type_repr(uc_fw->type),
732 uc_fw->file_selected.ver.major,
733 uc_fw->file_selected.ver.minor,
734 uc_fw->file_selected.ver.patch);
735 return -EINVAL;
736 }
737
738 if (!is_ver_8bit(&guc->submission_version)) {
739 gt_warn(gt, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
740 intel_uc_fw_type_repr(uc_fw->type),
741 guc->submission_version.major,
742 guc->submission_version.minor,
743 guc->submission_version.patch);
744 return -EINVAL;
745 }
746
747 return i915_inject_probe_error(gt->i915, -EINVAL);
748 }
749
check_fw_header(struct intel_gt * gt,const struct firmware * fw,struct intel_uc_fw * uc_fw)750 static int check_fw_header(struct intel_gt *gt,
751 const struct firmware *fw,
752 struct intel_uc_fw *uc_fw)
753 {
754 int err = 0;
755
756 if (uc_fw->has_gsc_headers)
757 err = check_gsc_manifest(gt, fw, uc_fw);
758 else
759 err = check_ccs_header(gt, fw, uc_fw);
760 if (err)
761 return err;
762
763 return 0;
764 }
765
try_firmware_load(struct intel_uc_fw * uc_fw,const struct firmware ** fw)766 static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
767 {
768 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
769 struct device *dev = gt->i915->drm.dev;
770 int err;
771
772 err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);
773
774 if (err)
775 return err;
776
777 if (uc_fw->needs_ggtt_mapping && (*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
778 gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
779 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
780 (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
781
782 /* try to find another blob to load */
783 release_firmware(*fw);
784 *fw = NULL;
785 return -ENOENT;
786 }
787
788 return 0;
789 }
790
check_mtl_huc_guc_compatibility(struct intel_gt * gt,struct intel_uc_fw_file * huc_selected)791 static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
792 struct intel_uc_fw_file *huc_selected)
793 {
794 struct intel_uc_fw_file *guc_selected = >->uc.guc.fw.file_selected;
795 struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
796 struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
797 bool new_huc, new_guc;
798
799 /* we can only do this check after having fetched both GuC and HuC */
800 GEM_BUG_ON(!huc_selected->path || !guc_selected->path);
801
802 /*
803 * Due to changes in the authentication flow for MTL, HuC 8.5.1 or newer
804 * requires GuC 70.7.0 or newer. Older HuC binaries will instead require
805 * GuC < 70.7.0.
806 */
807 new_huc = huc_ver->major > 8 ||
808 (huc_ver->major == 8 && huc_ver->minor > 5) ||
809 (huc_ver->major == 8 && huc_ver->minor == 5 && huc_ver->patch >= 1);
810
811 new_guc = guc_ver->major > 70 ||
812 (guc_ver->major == 70 && guc_ver->minor >= 7);
813
814 if (new_huc != new_guc) {
815 UNEXPECTED(gt, "HuC %u.%u.%u is incompatible with GuC %u.%u.%u\n",
816 huc_ver->major, huc_ver->minor, huc_ver->patch,
817 guc_ver->major, guc_ver->minor, guc_ver->patch);
818 gt_info(gt, "MTL GuC 70.7.0+ and HuC 8.5.1+ don't work with older releases\n");
819 return -ENOEXEC;
820 }
821
822 return 0;
823 }
824
intel_uc_check_file_version(struct intel_uc_fw * uc_fw,bool * old_ver)825 int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver)
826 {
827 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
828 struct intel_uc_fw_file *wanted = &uc_fw->file_wanted;
829 struct intel_uc_fw_file *selected = &uc_fw->file_selected;
830 int ret;
831
832 /*
833 * MTL has some compatibility issues with early GuC/HuC binaries
834 * not working with newer ones. This is specific to MTL and we
835 * don't expect it to extend to other platforms.
836 */
837 if (IS_METEORLAKE(gt->i915) && uc_fw->type == INTEL_UC_FW_TYPE_HUC) {
838 ret = check_mtl_huc_guc_compatibility(gt, selected);
839 if (ret)
840 return ret;
841 }
842
843 if (!wanted->ver.major || !selected->ver.major)
844 return 0;
845
846 /* Check the file's major version was as it claimed */
847 if (selected->ver.major != wanted->ver.major) {
848 UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
849 intel_uc_fw_type_repr(uc_fw->type), selected->path,
850 selected->ver.major, selected->ver.minor,
851 wanted->ver.major, wanted->ver.minor);
852 if (!intel_uc_fw_is_overridden(uc_fw))
853 return -ENOEXEC;
854 } else if (old_ver) {
855 if (selected->ver.minor < wanted->ver.minor)
856 *old_ver = true;
857 else if ((selected->ver.minor == wanted->ver.minor) &&
858 (selected->ver.patch < wanted->ver.patch))
859 *old_ver = true;
860 }
861
862 return 0;
863 }
864
865 /**
866 * intel_uc_fw_fetch - fetch uC firmware
867 * @uc_fw: uC firmware
868 *
869 * Fetch uC firmware into GEM obj.
870 *
871 * Return: 0 on success, a negative errno code on failure.
872 */
intel_uc_fw_fetch(struct intel_uc_fw * uc_fw)873 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
874 {
875 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
876 struct drm_i915_private *i915 = gt->i915;
877 struct intel_uc_fw_file file_ideal;
878 struct drm_i915_gem_object *obj;
879 const struct firmware *fw = NULL;
880 bool old_ver = false;
881 int err;
882
883 GEM_BUG_ON(!gt->wopcm.size);
884 GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
885
886 err = i915_inject_probe_error(i915, -ENXIO);
887 if (err)
888 goto fail;
889
890 __force_fw_fetch_failures(uc_fw, -EINVAL);
891 __force_fw_fetch_failures(uc_fw, -ESTALE);
892
893 err = try_firmware_load(uc_fw, &fw);
894 memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
895
896 /* Any error is terminal if overriding. Don't bother searching for older versions */
897 if (err && intel_uc_fw_is_overridden(uc_fw))
898 goto fail;
899
900 while (err == -ENOENT) {
901 old_ver = true;
902
903 __uc_fw_auto_select(i915, uc_fw);
904 if (!uc_fw->file_selected.path) {
905 /*
906 * No more options! But set the path back to something
907 * valid just in case it gets dereferenced.
908 */
909 uc_fw->file_selected.path = file_ideal.path;
910
911 /* Also, preserve the version that was really wanted */
912 memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
913 break;
914 }
915
916 err = try_firmware_load(uc_fw, &fw);
917 }
918
919 if (err)
920 goto fail;
921
922 err = check_fw_header(gt, fw, uc_fw);
923 if (err)
924 goto fail;
925
926 if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
927 err = guc_check_version_range(uc_fw);
928 if (err)
929 goto fail;
930 }
931
932 err = intel_uc_check_file_version(uc_fw, &old_ver);
933 if (err)
934 goto fail;
935
936 if (old_ver && uc_fw->file_selected.ver.major) {
937 /* Preserve the version that was really wanted */
938 memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
939
940 UNEXPECTED(gt, "%s firmware %s (%d.%d.%d) is recommended, but only %s (%d.%d.%d) was found\n",
941 intel_uc_fw_type_repr(uc_fw->type),
942 uc_fw->file_wanted.path,
943 uc_fw->file_wanted.ver.major,
944 uc_fw->file_wanted.ver.minor,
945 uc_fw->file_wanted.ver.patch,
946 uc_fw->file_selected.path,
947 uc_fw->file_selected.ver.major,
948 uc_fw->file_selected.ver.minor,
949 uc_fw->file_selected.ver.patch);
950 #ifdef __linux__
951 gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n",
952 INTEL_UC_FIRMWARE_URL);
953 #endif
954 }
955
956 if (HAS_LMEM(i915)) {
957 obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
958 if (!IS_ERR(obj))
959 obj->flags |= I915_BO_ALLOC_PM_EARLY;
960 } else {
961 obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
962 }
963
964 if (IS_ERR(obj)) {
965 err = PTR_ERR(obj);
966 goto fail;
967 }
968
969 uc_fw->obj = obj;
970 uc_fw->size = fw->size;
971 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
972
973 release_firmware(fw);
974 return 0;
975
976 fail:
977 intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
978 INTEL_UC_FIRMWARE_MISSING :
979 INTEL_UC_FIRMWARE_ERROR);
980
981 gt_probe_error(gt, "%s firmware %s: fetch failed %pe\n",
982 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
983 #ifdef __linux__
984 gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
985 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
986 #endif
987
988 release_firmware(fw); /* OK even if fw is NULL */
989 return err;
990 }
991
uc_fw_ggtt_offset(struct intel_uc_fw * uc_fw)992 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
993 {
994 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
995 struct i915_ggtt *ggtt = gt->ggtt;
996 struct drm_mm_node *node = &ggtt->uc_fw;
997 u32 offset = uc_fw->type * INTEL_UC_RSVD_GGTT_PER_FW;
998
999 /*
1000 * The media GT shares the GGTT with the root GT, which means that
1001 * we need to use different offsets for the binaries on the media GT.
1002 * To keep the math simple, we use 8MB for the root tile and 8MB for
1003 * the media one. This will need to be updated if we ever have more
1004 * than 1 media GT.
1005 */
1006 BUILD_BUG_ON(INTEL_UC_FW_NUM_TYPES * INTEL_UC_RSVD_GGTT_PER_FW > SZ_8M);
1007 GEM_BUG_ON(gt->type == GT_MEDIA && gt->info.id > 1);
1008 if (gt->type == GT_MEDIA)
1009 offset += SZ_8M;
1010
1011 GEM_BUG_ON(!drm_mm_node_allocated(node));
1012 GEM_BUG_ON(upper_32_bits(node->start));
1013 GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
1014 GEM_BUG_ON(offset + uc_fw->obj->base.size > node->size);
1015 GEM_BUG_ON(uc_fw->obj->base.size > INTEL_UC_RSVD_GGTT_PER_FW);
1016
1017 return lower_32_bits(node->start + offset);
1018 }
1019
uc_fw_bind_ggtt(struct intel_uc_fw * uc_fw)1020 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
1021 {
1022 struct drm_i915_gem_object *obj = uc_fw->obj;
1023 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
1024 struct i915_vma_resource *vma_res = &uc_fw->vma_res;
1025 u32 pte_flags = 0;
1026
1027 if (!uc_fw->needs_ggtt_mapping)
1028 return;
1029
1030 vma_res->start = uc_fw_ggtt_offset(uc_fw);
1031 vma_res->node_size = obj->base.size;
1032 vma_res->bi.pages = obj->mm.pages;
1033
1034 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1035
1036 /* uc_fw->obj cache domains were not controlled across suspend */
1037 if (i915_gem_object_has_struct_page(obj))
1038 drm_clflush_sg(vma_res->bi.pages);
1039
1040 if (i915_gem_object_is_lmem(obj))
1041 pte_flags |= PTE_LM;
1042
1043 if (ggtt->vm.raw_insert_entries)
1044 ggtt->vm.raw_insert_entries(&ggtt->vm, vma_res,
1045 i915_gem_get_pat_index(ggtt->vm.i915,
1046 I915_CACHE_NONE),
1047 pte_flags);
1048 else
1049 ggtt->vm.insert_entries(&ggtt->vm, vma_res,
1050 i915_gem_get_pat_index(ggtt->vm.i915,
1051 I915_CACHE_NONE),
1052 pte_flags);
1053 }
1054
uc_fw_unbind_ggtt(struct intel_uc_fw * uc_fw)1055 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
1056 {
1057 struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
1058 struct i915_vma_resource *vma_res = &uc_fw->vma_res;
1059
1060 if (!vma_res->node_size)
1061 return;
1062
1063 ggtt->vm.clear_range(&ggtt->vm, vma_res->start, vma_res->node_size);
1064 }
1065
uc_fw_xfer(struct intel_uc_fw * uc_fw,u32 dst_offset,u32 dma_flags)1066 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
1067 {
1068 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
1069 struct intel_uncore *uncore = gt->uncore;
1070 u64 offset;
1071 int ret;
1072
1073 ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
1074 if (ret)
1075 return ret;
1076
1077 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1078
1079 /* Set the source address for the uCode */
1080 offset = uc_fw->vma_res.start + uc_fw->dma_start_offset;
1081 GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
1082 intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
1083 intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
1084
1085 /* Set the DMA destination */
1086 intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
1087 intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
1088
1089 /*
1090 * Set the transfer size. The header plus uCode will be copied to WOPCM
1091 * via DMA, excluding any other components
1092 */
1093 intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
1094 sizeof(struct uc_css_header) + uc_fw->ucode_size);
1095
1096 /* Start the DMA */
1097 intel_uncore_write_fw(uncore, DMA_CTRL,
1098 _MASKED_BIT_ENABLE(dma_flags | START_DMA));
1099
1100 /* Wait for DMA to finish */
1101 ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
1102 if (ret)
1103 gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
1104 intel_uc_fw_type_repr(uc_fw->type),
1105 intel_uncore_read_fw(uncore, DMA_CTRL));
1106
1107 /* Disable the bits once DMA is over */
1108 intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
1109
1110 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1111
1112 return ret;
1113 }
1114
intel_uc_fw_mark_load_failed(struct intel_uc_fw * uc_fw,int err)1115 int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err)
1116 {
1117 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
1118
1119 GEM_BUG_ON(!intel_uc_fw_is_loadable(uc_fw));
1120
1121 gt_probe_error(gt, "Failed to load %s firmware %s %pe\n",
1122 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
1123 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
1124
1125 return err;
1126 }
1127
1128 /**
1129 * intel_uc_fw_upload - load uC firmware using custom loader
1130 * @uc_fw: uC firmware
1131 * @dst_offset: destination offset
1132 * @dma_flags: flags for flags for dma ctrl
1133 *
1134 * Loads uC firmware and updates internal flags.
1135 *
1136 * Return: 0 on success, non-zero on failure.
1137 */
intel_uc_fw_upload(struct intel_uc_fw * uc_fw,u32 dst_offset,u32 dma_flags)1138 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
1139 {
1140 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
1141 int err;
1142
1143 /* make sure the status was cleared the last time we reset the uc */
1144 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
1145
1146 err = i915_inject_probe_error(gt->i915, -ENOEXEC);
1147 if (err)
1148 return err;
1149
1150 if (!intel_uc_fw_is_loadable(uc_fw))
1151 return -ENOEXEC;
1152
1153 /* Call custom loader */
1154 err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
1155 if (err)
1156 goto fail;
1157
1158 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
1159 return 0;
1160
1161 fail:
1162 return intel_uc_fw_mark_load_failed(uc_fw, err);
1163 }
1164
uc_fw_need_rsa_in_memory(struct intel_uc_fw * uc_fw)1165 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
1166 {
1167 /*
1168 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
1169 * while it reads it from the 64 RSA registers if it is smaller.
1170 * The HuC RSA is always read from memory.
1171 */
1172 return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
1173 }
1174
uc_fw_rsa_data_create(struct intel_uc_fw * uc_fw)1175 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
1176 {
1177 struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
1178 struct i915_vma *vma;
1179 size_t copied;
1180 void *vaddr;
1181 int err;
1182
1183 err = i915_inject_probe_error(gt->i915, -ENXIO);
1184 if (err)
1185 return err;
1186
1187 if (!uc_fw_need_rsa_in_memory(uc_fw))
1188 return 0;
1189
1190 /*
1191 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
1192 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
1193 * authentication from memory, as the RSA offset now falls within the
1194 * GuC inaccessible range. We resort to perma-pinning an additional vma
1195 * within the accessible range that only contains the RSA signature.
1196 * The GuC HW can use this extra pinning to perform the authentication
1197 * since its GGTT offset will be GuC accessible.
1198 */
1199 GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
1200 vma = intel_guc_allocate_vma(>->uc.guc, PAGE_SIZE);
1201 if (IS_ERR(vma))
1202 return PTR_ERR(vma);
1203
1204 vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
1205 intel_gt_coherent_map_type(gt, vma->obj, true));
1206 if (IS_ERR(vaddr)) {
1207 i915_vma_unpin_and_release(&vma, 0);
1208 err = PTR_ERR(vaddr);
1209 goto unpin_out;
1210 }
1211
1212 copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
1213 i915_gem_object_unpin_map(vma->obj);
1214
1215 if (copied < uc_fw->rsa_size) {
1216 err = -ENOMEM;
1217 goto unpin_out;
1218 }
1219
1220 uc_fw->rsa_data = vma;
1221
1222 return 0;
1223
1224 unpin_out:
1225 i915_vma_unpin_and_release(&vma, 0);
1226 return err;
1227 }
1228
uc_fw_rsa_data_destroy(struct intel_uc_fw * uc_fw)1229 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
1230 {
1231 i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
1232 }
1233
intel_uc_fw_init(struct intel_uc_fw * uc_fw)1234 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
1235 {
1236 int err;
1237
1238 /* this should happen before the load! */
1239 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
1240
1241 if (!intel_uc_fw_is_available(uc_fw))
1242 return -ENOEXEC;
1243
1244 err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
1245 if (err) {
1246 gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw pin-pages failed %pe\n",
1247 intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
1248 goto out;
1249 }
1250
1251 err = uc_fw_rsa_data_create(uc_fw);
1252 if (err) {
1253 gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw rsa data creation failed %pe\n",
1254 intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
1255 goto out_unpin;
1256 }
1257
1258 uc_fw_bind_ggtt(uc_fw);
1259
1260 return 0;
1261
1262 out_unpin:
1263 i915_gem_object_unpin_pages(uc_fw->obj);
1264 out:
1265 return err;
1266 }
1267
intel_uc_fw_fini(struct intel_uc_fw * uc_fw)1268 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
1269 {
1270 uc_fw_unbind_ggtt(uc_fw);
1271 uc_fw_rsa_data_destroy(uc_fw);
1272
1273 if (i915_gem_object_has_pinned_pages(uc_fw->obj))
1274 i915_gem_object_unpin_pages(uc_fw->obj);
1275
1276 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
1277 }
1278
intel_uc_fw_resume_mapping(struct intel_uc_fw * uc_fw)1279 void intel_uc_fw_resume_mapping(struct intel_uc_fw *uc_fw)
1280 {
1281 if (!intel_uc_fw_is_available(uc_fw))
1282 return;
1283
1284 if (!i915_gem_object_has_pinned_pages(uc_fw->obj))
1285 return;
1286
1287 uc_fw_bind_ggtt(uc_fw);
1288 }
1289
1290 /**
1291 * intel_uc_fw_cleanup_fetch - cleanup uC firmware
1292 * @uc_fw: uC firmware
1293 *
1294 * Cleans up uC firmware by releasing the firmware GEM obj.
1295 */
intel_uc_fw_cleanup_fetch(struct intel_uc_fw * uc_fw)1296 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
1297 {
1298 if (!intel_uc_fw_is_available(uc_fw))
1299 return;
1300
1301 i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
1302
1303 intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
1304 }
1305
1306 /**
1307 * intel_uc_fw_copy_rsa - copy fw RSA to buffer
1308 *
1309 * @uc_fw: uC firmware
1310 * @dst: dst buffer
1311 * @max_len: max number of bytes to copy
1312 *
1313 * Return: number of copied bytes.
1314 */
intel_uc_fw_copy_rsa(struct intel_uc_fw * uc_fw,void * dst,u32 max_len)1315 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
1316 {
1317 struct intel_memory_region *mr = uc_fw->obj->mm.region;
1318 u32 size = min_t(u32, uc_fw->rsa_size, max_len);
1319 u32 offset = uc_fw->dma_start_offset + sizeof(struct uc_css_header) + uc_fw->ucode_size;
1320 struct sgt_iter iter;
1321 size_t count = 0;
1322 int idx;
1323
1324 /* Called during reset handling, must be atomic [no fs_reclaim] */
1325 GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
1326
1327 idx = offset >> PAGE_SHIFT;
1328 offset = offset_in_page(offset);
1329 if (i915_gem_object_has_struct_page(uc_fw->obj)) {
1330 struct vm_page *page;
1331
1332 for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
1333 u32 len = min_t(u32, size, PAGE_SIZE - offset);
1334 void *vaddr;
1335
1336 if (idx > 0) {
1337 idx--;
1338 continue;
1339 }
1340
1341 vaddr = kmap_atomic(page);
1342 memcpy(dst, vaddr + offset, len);
1343 kunmap_atomic(vaddr);
1344
1345 offset = 0;
1346 dst += len;
1347 size -= len;
1348 count += len;
1349 if (!size)
1350 break;
1351 }
1352 } else {
1353 dma_addr_t addr;
1354
1355 for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
1356 u32 len = min_t(u32, size, PAGE_SIZE - offset);
1357 void __iomem *vaddr;
1358
1359 if (idx > 0) {
1360 idx--;
1361 continue;
1362 }
1363
1364 vaddr = io_mapping_map_atomic_wc(&mr->iomap,
1365 addr - mr->region.start);
1366 memcpy_fromio(dst, vaddr + offset, len);
1367 io_mapping_unmap_atomic(vaddr);
1368
1369 offset = 0;
1370 dst += len;
1371 size -= len;
1372 count += len;
1373 if (!size)
1374 break;
1375 }
1376 }
1377
1378 return count;
1379 }
1380
1381 /**
1382 * intel_uc_fw_dump - dump information about uC firmware
1383 * @uc_fw: uC firmware
1384 * @p: the &drm_printer
1385 *
1386 * Pretty printer for uC firmware.
1387 */
intel_uc_fw_dump(const struct intel_uc_fw * uc_fw,struct drm_printer * p)1388 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
1389 {
1390 bool got_wanted;
1391
1392 drm_printf(p, "%s firmware: %s\n",
1393 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
1394 if (uc_fw->file_selected.path != uc_fw->file_wanted.path)
1395 drm_printf(p, "%s firmware wanted: %s\n",
1396 intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
1397 drm_printf(p, "\tstatus: %s\n",
1398 intel_uc_fw_status_repr(uc_fw->status));
1399
1400 if (uc_fw->file_selected.ver.major < uc_fw->file_wanted.ver.major)
1401 got_wanted = false;
1402 else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
1403 (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor))
1404 got_wanted = false;
1405 else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
1406 (uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
1407 (uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
1408 got_wanted = false;
1409 else
1410 got_wanted = true;
1411
1412 if (!got_wanted)
1413 drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
1414 uc_fw->file_wanted.ver.major,
1415 uc_fw->file_wanted.ver.minor,
1416 uc_fw->file_wanted.ver.patch,
1417 uc_fw->file_selected.ver.major,
1418 uc_fw->file_selected.ver.minor,
1419 uc_fw->file_selected.ver.patch);
1420 else
1421 drm_printf(p, "\tversion: found %u.%u.%u\n",
1422 uc_fw->file_selected.ver.major,
1423 uc_fw->file_selected.ver.minor,
1424 uc_fw->file_selected.ver.patch);
1425 drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
1426 drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
1427 }
1428