1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "gem/i915_gem_lmem.h"
7 #include "gt/intel_engine_pm.h"
8 #include "gt/intel_gpu_commands.h"
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_print.h"
11 #include "gt/intel_ring.h"
12 #include "intel_gsc_binary_headers.h"
13 #include "intel_gsc_fw.h"
14 #include "intel_gsc_uc_heci_cmd_submit.h"
15 #include "i915_reg.h"
16
gsc_is_in_reset(struct intel_uncore * uncore)17 static bool gsc_is_in_reset(struct intel_uncore *uncore)
18 {
19 u32 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
20
21 return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fw_status) ==
22 HECI1_FWSTS1_CURRENT_STATE_RESET;
23 }
24
gsc_uc_get_fw_status(struct intel_uncore * uncore,bool needs_wakeref)25 static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore, bool needs_wakeref)
26 {
27 intel_wakeref_t wakeref;
28 u32 fw_status = 0;
29
30 if (needs_wakeref)
31 wakeref = intel_runtime_pm_get(uncore->rpm);
32
33 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
34
35 if (needs_wakeref)
36 intel_runtime_pm_put(uncore->rpm, wakeref);
37 return fw_status;
38 }
39
intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc * gsc,bool needs_wakeref)40 bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref)
41 {
42 return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE,
43 gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore,
44 needs_wakeref)) ==
45 HECI1_FWSTS1_PROXY_STATE_NORMAL;
46 }
47
intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc * gsc)48 int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc)
49 {
50 if (!(IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)))
51 return -ENODEV;
52 if (!intel_uc_fw_is_loadable(&gsc->fw))
53 return -ENODEV;
54 if (__intel_uc_fw_status(&gsc->fw) == INTEL_UC_FIRMWARE_LOAD_FAIL)
55 return -ENOLINK;
56 if (!intel_gsc_uc_fw_proxy_init_done(gsc, true))
57 return -EAGAIN;
58
59 return 0;
60 }
61
intel_gsc_uc_fw_init_done(struct intel_gsc_uc * gsc)62 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
63 {
64 return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, false) &
65 HECI1_FWSTS1_INIT_COMPLETE;
66 }
67
cpd_entry_offset(const struct intel_gsc_cpd_entry * entry)68 static inline u32 cpd_entry_offset(const struct intel_gsc_cpd_entry *entry)
69 {
70 return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK;
71 }
72
intel_gsc_fw_get_binary_info(struct intel_uc_fw * gsc_fw,const void * data,size_t size)73 int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size)
74 {
75 struct intel_gsc_uc *gsc = container_of(gsc_fw, struct intel_gsc_uc, fw);
76 struct intel_gt *gt = gsc_uc_to_gt(gsc);
77 const struct intel_gsc_layout_pointers *layout = data;
78 const struct intel_gsc_bpdt_header *bpdt_header = NULL;
79 const struct intel_gsc_bpdt_entry *bpdt_entry = NULL;
80 const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
81 const struct intel_gsc_cpd_entry *cpd_entry = NULL;
82 const struct intel_gsc_manifest_header *manifest;
83 size_t min_size = sizeof(*layout);
84 int i;
85
86 if (size < min_size) {
87 gt_err(gt, "GSC FW too small! %zu < %zu\n", size, min_size);
88 return -ENODATA;
89 }
90
91 /*
92 * The GSC binary starts with the pointer layout, which contains the
93 * locations of the various partitions of the binary. The one we're
94 * interested in to get the version is the boot1 partition, where we can
95 * find a BPDT header followed by entries, one of which points to the
96 * RBE sub-section of the partition. From here, we can parse the CPD
97 * header and the following entries to find the manifest location
98 * (entry identified by the "RBEP.man" name), from which we can finally
99 * extract the version.
100 *
101 * --------------------------------------------------
102 * [ intel_gsc_layout_pointers ]
103 * [ ... ]
104 * [ boot1.offset >---------------------------]------o
105 * [ ... ] |
106 * -------------------------------------------------- |
107 * |
108 * -------------------------------------------------- |
109 * [ intel_gsc_bpdt_header ]<-----o
110 * --------------------------------------------------
111 * [ intel_gsc_bpdt_entry[] ]
112 * [ entry1 ]
113 * [ ... ]
114 * [ entryX ]
115 * [ type == GSC_RBE ]
116 * [ offset >-----------------------------]------o
117 * [ ... ] |
118 * -------------------------------------------------- |
119 * |
120 * -------------------------------------------------- |
121 * [ intel_gsc_cpd_header_v2 ]<-----o
122 * --------------------------------------------------
123 * [ intel_gsc_cpd_entry[] ]
124 * [ entry1 ]
125 * [ ... ]
126 * [ entryX ]
127 * [ "RBEP.man" ]
128 * [ ... ]
129 * [ offset >----------------------------]------o
130 * [ ... ] |
131 * -------------------------------------------------- |
132 * |
133 * -------------------------------------------------- |
134 * [ intel_gsc_manifest_header ]<-----o
135 * [ ... ]
136 * [ intel_gsc_version fw_version ]
137 * [ ... ]
138 * --------------------------------------------------
139 */
140
141 min_size = layout->boot1.offset + layout->boot1.size;
142 if (size < min_size) {
143 gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n",
144 size, min_size);
145 return -ENODATA;
146 }
147
148 min_size = sizeof(*bpdt_header);
149 if (layout->boot1.size < min_size) {
150 gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n",
151 layout->boot1.size, min_size);
152 return -ENODATA;
153 }
154
155 bpdt_header = data + layout->boot1.offset;
156 if (bpdt_header->signature != INTEL_GSC_BPDT_HEADER_SIGNATURE) {
157 gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n",
158 bpdt_header->signature);
159 return -EINVAL;
160 }
161
162 min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count;
163 if (layout->boot1.size < min_size) {
164 gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n",
165 layout->boot1.size, min_size);
166 return -ENODATA;
167 }
168
169 bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header);
170 for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) {
171 if ((bpdt_entry->type & INTEL_GSC_BPDT_ENTRY_TYPE_MASK) !=
172 INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE)
173 continue;
174
175 cpd_header = (void *)bpdt_header + bpdt_entry->sub_partition_offset;
176 min_size = bpdt_entry->sub_partition_offset + sizeof(*cpd_header);
177 break;
178 }
179
180 if (!cpd_header) {
181 gt_err(gt, "couldn't find CPD header in GSC binary!\n");
182 return -ENODATA;
183 }
184
185 if (layout->boot1.size < min_size) {
186 gt_err(gt, "GSC FW boot section too small for CPD header: %u < %zu\n",
187 layout->boot1.size, min_size);
188 return -ENODATA;
189 }
190
191 if (cpd_header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) {
192 gt_err(gt, "invalid marker for CPD header in GSC bin: 0x%08x!\n",
193 cpd_header->header_marker);
194 return -EINVAL;
195 }
196
197 min_size += sizeof(*cpd_entry) * cpd_header->num_of_entries;
198 if (layout->boot1.size < min_size) {
199 gt_err(gt, "GSC FW boot section too small for CPD entries: %u < %zu\n",
200 layout->boot1.size, min_size);
201 return -ENODATA;
202 }
203
204 cpd_entry = (void *)cpd_header + cpd_header->header_length;
205 for (i = 0; i < cpd_header->num_of_entries; i++, cpd_entry++) {
206 if (strcmp(cpd_entry->name, "RBEP.man") == 0) {
207 manifest = (void *)cpd_header + cpd_entry_offset(cpd_entry);
208 intel_uc_fw_version_from_gsc_manifest(&gsc->release,
209 manifest);
210 gsc->security_version = manifest->security_version;
211 break;
212 }
213 }
214
215 if (IS_ARROWLAKE(gt->i915)) {
216 bool too_old = false;
217
218 /*
219 * ARL requires a newer firmware than MTL did (102.0.10.1878) but the
220 * firmware is actually common. So, need to do an explicit version check
221 * here rather than using a separate table entry. And if the older
222 * MTL-only version is found, then just don't use GSC rather than aborting
223 * the driver load.
224 */
225 if (gsc->release.major < 102) {
226 too_old = true;
227 } else if (gsc->release.major == 102) {
228 if (gsc->release.minor == 0) {
229 if (gsc->release.patch < 10) {
230 too_old = true;
231 } else if (gsc->release.patch == 10) {
232 if (gsc->release.build < 1878)
233 too_old = true;
234 }
235 }
236 }
237
238 if (too_old) {
239 gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least 102.0.10.1878",
240 gsc->release.major, gsc->release.minor,
241 gsc->release.patch, gsc->release.build);
242 return -EINVAL;
243 }
244 }
245
246 return 0;
247 }
248
emit_gsc_fw_load(struct i915_request * rq,struct intel_gsc_uc * gsc)249 static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
250 {
251 u32 offset = i915_ggtt_offset(gsc->local);
252 u32 *cs;
253
254 cs = intel_ring_begin(rq, 4);
255 if (IS_ERR(cs))
256 return PTR_ERR(cs);
257
258 *cs++ = GSC_FW_LOAD;
259 *cs++ = lower_32_bits(offset);
260 *cs++ = upper_32_bits(offset);
261 *cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
262
263 intel_ring_advance(rq, cs);
264
265 return 0;
266 }
267
gsc_fw_load(struct intel_gsc_uc * gsc)268 static int gsc_fw_load(struct intel_gsc_uc *gsc)
269 {
270 struct intel_context *ce = gsc->ce;
271 struct i915_request *rq;
272 int err;
273
274 if (!ce)
275 return -ENODEV;
276
277 rq = i915_request_create(ce);
278 if (IS_ERR(rq))
279 return PTR_ERR(rq);
280
281 if (ce->engine->emit_init_breadcrumb) {
282 err = ce->engine->emit_init_breadcrumb(rq);
283 if (err)
284 goto out_rq;
285 }
286
287 err = emit_gsc_fw_load(rq, gsc);
288 if (err)
289 goto out_rq;
290
291 err = ce->engine->emit_flush(rq, 0);
292
293 out_rq:
294 i915_request_get(rq);
295
296 if (unlikely(err))
297 i915_request_set_error_once(rq, err);
298
299 i915_request_add(rq);
300
301 if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
302 err = -ETIME;
303
304 i915_request_put(rq);
305
306 if (err)
307 gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
308 ERR_PTR(err));
309
310 return err;
311 }
312
gsc_fw_load_prepare(struct intel_gsc_uc * gsc)313 static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
314 {
315 struct intel_gt *gt = gsc_uc_to_gt(gsc);
316 void *src;
317
318 if (!gsc->local)
319 return -ENODEV;
320
321 if (gsc->local->size < gsc->fw.size)
322 return -ENOSPC;
323
324 src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
325 intel_gt_coherent_map_type(gt, gsc->fw.obj, true));
326 if (IS_ERR(src))
327 return PTR_ERR(src);
328
329 memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
330 memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
331
332 intel_guc_write_barrier(gt_to_guc(gt));
333
334 i915_gem_object_unpin_map(gsc->fw.obj);
335
336 return 0;
337 }
338
gsc_fw_wait(struct intel_gt * gt)339 static int gsc_fw_wait(struct intel_gt *gt)
340 {
341 return intel_wait_for_register(gt->uncore,
342 HECI_FWSTS(MTL_GSC_HECI1_BASE, 1),
343 HECI1_FWSTS1_INIT_COMPLETE,
344 HECI1_FWSTS1_INIT_COMPLETE,
345 500);
346 }
347
348 struct intel_gsc_mkhi_header {
349 u8 group_id;
350 #define MKHI_GROUP_ID_GFX_SRV 0x30
351
352 u8 command;
353 #define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42)
354
355 u8 reserved;
356 u8 result;
357 } __packed;
358
359 struct mtl_gsc_ver_msg_in {
360 struct intel_gsc_mtl_header header;
361 struct intel_gsc_mkhi_header mkhi;
362 } __packed;
363
364 struct mtl_gsc_ver_msg_out {
365 struct intel_gsc_mtl_header header;
366 struct intel_gsc_mkhi_header mkhi;
367 u16 proj_major;
368 u16 compat_major;
369 u16 compat_minor;
370 u16 reserved[5];
371 } __packed;
372
373 #define GSC_VER_PKT_SZ SZ_4K
374
gsc_fw_query_compatibility_version(struct intel_gsc_uc * gsc)375 static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
376 {
377 struct intel_gt *gt = gsc_uc_to_gt(gsc);
378 struct mtl_gsc_ver_msg_in *msg_in;
379 struct mtl_gsc_ver_msg_out *msg_out;
380 struct i915_vma *vma;
381 u64 offset;
382 void *vaddr;
383 int err;
384
385 err = intel_guc_allocate_and_map_vma(gt_to_guc(gt), GSC_VER_PKT_SZ * 2,
386 &vma, &vaddr);
387 if (err) {
388 gt_err(gt, "failed to allocate vma for GSC version query\n");
389 return err;
390 }
391
392 offset = i915_ggtt_offset(vma);
393 msg_in = vaddr;
394 msg_out = vaddr + GSC_VER_PKT_SZ;
395
396 intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header,
397 HECI_MEADDRESS_MKHI,
398 sizeof(*msg_in), 0);
399 msg_in->mkhi.group_id = MKHI_GROUP_ID_GFX_SRV;
400 msg_in->mkhi.command = MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION;
401
402 err = intel_gsc_uc_heci_cmd_submit_packet(>->uc.gsc,
403 offset,
404 sizeof(*msg_in),
405 offset + GSC_VER_PKT_SZ,
406 GSC_VER_PKT_SZ);
407 if (err) {
408 gt_err(gt,
409 "failed to submit GSC request for compatibility version: %d\n",
410 err);
411 goto out_vma;
412 }
413
414 if (msg_out->header.message_size != sizeof(*msg_out)) {
415 gt_err(gt, "invalid GSC reply length %u [expected %zu], s=0x%x, f=0x%x, r=0x%x\n",
416 msg_out->header.message_size, sizeof(*msg_out),
417 msg_out->header.status, msg_out->header.flags, msg_out->mkhi.result);
418 err = -EPROTO;
419 goto out_vma;
420 }
421
422 gsc->fw.file_selected.ver.major = msg_out->compat_major;
423 gsc->fw.file_selected.ver.minor = msg_out->compat_minor;
424
425 out_vma:
426 i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
427 return err;
428 }
429
intel_gsc_uc_fw_upload(struct intel_gsc_uc * gsc)430 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
431 {
432 struct intel_gt *gt = gsc_uc_to_gt(gsc);
433 struct intel_uc_fw *gsc_fw = &gsc->fw;
434 int err;
435
436 /* check current fw status */
437 if (intel_gsc_uc_fw_init_done(gsc)) {
438 if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
439 intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
440 return -EEXIST;
441 }
442
443 if (!intel_uc_fw_is_loadable(gsc_fw))
444 return -ENOEXEC;
445
446 /* FW blob is ok, so clean the status */
447 intel_uc_fw_sanitize(&gsc->fw);
448
449 if (!gsc_is_in_reset(gt->uncore))
450 return -EIO;
451
452 err = gsc_fw_load_prepare(gsc);
453 if (err)
454 goto fail;
455
456 /*
457 * GSC is only killed by an FLR, so we need to trigger one on unload to
458 * make sure we stop it. This is because we assign a chunk of memory to
459 * the GSC as part of the FW load , so we need to make sure it stops
460 * using it when we release it to the system on driver unload. Note that
461 * this is not a problem of the unload per-se, because the GSC will not
462 * touch that memory unless there are requests for it coming from the
463 * driver; therefore, no accesses will happen while i915 is not loaded,
464 * but if we re-load the driver then the GSC might wake up and try to
465 * access that old memory location again.
466 * Given that an FLR is a very disruptive action (see the FLR function
467 * for details), we want to do it as the last action before releasing
468 * the access to the MMIO bar, which means we need to do it as part of
469 * the primary uncore cleanup.
470 * An alternative approach to the FLR would be to use a memory location
471 * that survives driver unload, like e.g. stolen memory, and keep the
472 * GSC loaded across reloads. However, this requires us to make sure we
473 * preserve that memory location on unload and then determine and
474 * reserve its offset on each subsequent load, which is not trivial, so
475 * it is easier to just kill everything and start fresh.
476 */
477 intel_uncore_set_flr_on_fini(>->i915->uncore);
478
479 err = gsc_fw_load(gsc);
480 if (err)
481 goto fail;
482
483 err = gsc_fw_wait(gt);
484 if (err)
485 goto fail;
486
487 err = gsc_fw_query_compatibility_version(gsc);
488 if (err)
489 goto fail;
490
491 /* we only support compatibility version 1.0 at the moment */
492 err = intel_uc_check_file_version(gsc_fw, NULL);
493 if (err)
494 goto fail;
495
496 /* FW is not fully operational until we enable SW proxy */
497 intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
498
499 gt_info(gt, "Loaded GSC firmware %s (cv%u.%u, r%u.%u.%u.%u, svn %u)\n",
500 gsc_fw->file_selected.path,
501 gsc_fw->file_selected.ver.major, gsc_fw->file_selected.ver.minor,
502 gsc->release.major, gsc->release.minor,
503 gsc->release.patch, gsc->release.build,
504 gsc->security_version);
505
506 return 0;
507
508 fail:
509 return intel_uc_fw_mark_load_failed(gsc_fw, err);
510 }
511