11bb76ff1Sjsg // SPDX-License-Identifier: MIT
21bb76ff1Sjsg /*
31bb76ff1Sjsg * Copyright © 2021-2022 Intel Corporation
41bb76ff1Sjsg */
51bb76ff1Sjsg
61bb76ff1Sjsg #include <linux/types.h>
71bb76ff1Sjsg
81bb76ff1Sjsg #include <drm/drm_print.h>
91bb76ff1Sjsg
101bb76ff1Sjsg #include "gt/intel_engine_regs.h"
111bb76ff1Sjsg #include "gt/intel_gt.h"
121bb76ff1Sjsg #include "gt/intel_gt_mcr.h"
131bb76ff1Sjsg #include "gt/intel_gt_regs.h"
141bb76ff1Sjsg #include "gt/intel_lrc.h"
151bb76ff1Sjsg #include "guc_capture_fwif.h"
161bb76ff1Sjsg #include "intel_guc_capture.h"
171bb76ff1Sjsg #include "intel_guc_fwif.h"
18*f005ef32Sjsg #include "intel_guc_print.h"
191bb76ff1Sjsg #include "i915_drv.h"
201bb76ff1Sjsg #include "i915_gpu_error.h"
211bb76ff1Sjsg #include "i915_irq.h"
221bb76ff1Sjsg #include "i915_memcpy.h"
231bb76ff1Sjsg #include "i915_reg.h"
241bb76ff1Sjsg
251bb76ff1Sjsg /*
261bb76ff1Sjsg * Define all device tables of GuC error capture register lists
271bb76ff1Sjsg * NOTE: For engine-registers, GuC only needs the register offsets
281bb76ff1Sjsg * from the engine-mmio-base
291bb76ff1Sjsg */
301bb76ff1Sjsg #define COMMON_BASE_GLOBAL \
311bb76ff1Sjsg { FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
321bb76ff1Sjsg
33*f005ef32Sjsg #define COMMON_GEN8BASE_GLOBAL \
341bb76ff1Sjsg { ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
351bb76ff1Sjsg { DONE_REG, 0, 0, "DONE_REG" }, \
361bb76ff1Sjsg { HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
371bb76ff1Sjsg
38*f005ef32Sjsg #define GEN8_GLOBAL \
39879897e7Sjsg { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
40879897e7Sjsg { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
41879897e7Sjsg
421bb76ff1Sjsg #define COMMON_GEN12BASE_GLOBAL \
431bb76ff1Sjsg { GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
441bb76ff1Sjsg { GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
451bb76ff1Sjsg { GEN12_AUX_ERR_DBG, 0, 0, "AUX_ERR_DBG" }, \
461bb76ff1Sjsg { GEN12_GAM_DONE, 0, 0, "GAM_DONE" }, \
471bb76ff1Sjsg { GEN12_RING_FAULT_REG, 0, 0, "FAULT_REG" }
481bb76ff1Sjsg
491bb76ff1Sjsg #define COMMON_BASE_ENGINE_INSTANCE \
501bb76ff1Sjsg { RING_PSMI_CTL(0), 0, 0, "RC PSMI" }, \
511bb76ff1Sjsg { RING_ESR(0), 0, 0, "ESR" }, \
521bb76ff1Sjsg { RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
531bb76ff1Sjsg { RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
541bb76ff1Sjsg { RING_IPEIR(0), 0, 0, "IPEIR" }, \
551bb76ff1Sjsg { RING_IPEHR(0), 0, 0, "IPEHR" }, \
561bb76ff1Sjsg { RING_INSTPS(0), 0, 0, "INSTPS" }, \
571bb76ff1Sjsg { RING_BBADDR(0), 0, 0, "RING_BBADDR_LOW32" }, \
581bb76ff1Sjsg { RING_BBADDR_UDW(0), 0, 0, "RING_BBADDR_UP32" }, \
591bb76ff1Sjsg { RING_BBSTATE(0), 0, 0, "BB_STATE" }, \
601bb76ff1Sjsg { CCID(0), 0, 0, "CCID" }, \
611bb76ff1Sjsg { RING_ACTHD(0), 0, 0, "ACTHD_LDW" }, \
621bb76ff1Sjsg { RING_ACTHD_UDW(0), 0, 0, "ACTHD_UDW" }, \
631bb76ff1Sjsg { RING_INSTPM(0), 0, 0, "INSTPM" }, \
641bb76ff1Sjsg { RING_INSTDONE(0), 0, 0, "INSTDONE" }, \
651bb76ff1Sjsg { RING_NOPID(0), 0, 0, "RING_NOPID" }, \
661bb76ff1Sjsg { RING_START(0), 0, 0, "START" }, \
671bb76ff1Sjsg { RING_HEAD(0), 0, 0, "HEAD" }, \
681bb76ff1Sjsg { RING_TAIL(0), 0, 0, "TAIL" }, \
691bb76ff1Sjsg { RING_CTL(0), 0, 0, "CTL" }, \
701bb76ff1Sjsg { RING_MI_MODE(0), 0, 0, "MODE" }, \
711bb76ff1Sjsg { RING_CONTEXT_CONTROL(0), 0, 0, "RING_CONTEXT_CONTROL" }, \
721bb76ff1Sjsg { RING_HWS_PGA(0), 0, 0, "HWS" }, \
731bb76ff1Sjsg { RING_MODE_GEN7(0), 0, 0, "GFX_MODE" }, \
741bb76ff1Sjsg { GEN8_RING_PDP_LDW(0, 0), 0, 0, "PDP0_LDW" }, \
751bb76ff1Sjsg { GEN8_RING_PDP_UDW(0, 0), 0, 0, "PDP0_UDW" }, \
761bb76ff1Sjsg { GEN8_RING_PDP_LDW(0, 1), 0, 0, "PDP1_LDW" }, \
771bb76ff1Sjsg { GEN8_RING_PDP_UDW(0, 1), 0, 0, "PDP1_UDW" }, \
781bb76ff1Sjsg { GEN8_RING_PDP_LDW(0, 2), 0, 0, "PDP2_LDW" }, \
791bb76ff1Sjsg { GEN8_RING_PDP_UDW(0, 2), 0, 0, "PDP2_UDW" }, \
801bb76ff1Sjsg { GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
811bb76ff1Sjsg { GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
821bb76ff1Sjsg
831bb76ff1Sjsg #define COMMON_BASE_HAS_EU \
841bb76ff1Sjsg { EIR, 0, 0, "EIR" }
851bb76ff1Sjsg
861bb76ff1Sjsg #define COMMON_BASE_RENDER \
871bb76ff1Sjsg { GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
881bb76ff1Sjsg
891bb76ff1Sjsg #define COMMON_GEN12BASE_RENDER \
901bb76ff1Sjsg { GEN12_SC_INSTDONE_EXTRA, 0, 0, "GEN12_SC_INSTDONE_EXTRA" }, \
911bb76ff1Sjsg { GEN12_SC_INSTDONE_EXTRA2, 0, 0, "GEN12_SC_INSTDONE_EXTRA2" }
921bb76ff1Sjsg
931bb76ff1Sjsg #define COMMON_GEN12BASE_VEC \
941bb76ff1Sjsg { GEN12_SFC_DONE(0), 0, 0, "SFC_DONE[0]" }, \
951bb76ff1Sjsg { GEN12_SFC_DONE(1), 0, 0, "SFC_DONE[1]" }, \
961bb76ff1Sjsg { GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \
971bb76ff1Sjsg { GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" }
981bb76ff1Sjsg
99*f005ef32Sjsg /* XE_LP Global */
100*f005ef32Sjsg static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
1011bb76ff1Sjsg COMMON_BASE_GLOBAL,
102*f005ef32Sjsg COMMON_GEN8BASE_GLOBAL,
1031bb76ff1Sjsg COMMON_GEN12BASE_GLOBAL,
1041bb76ff1Sjsg };
1051bb76ff1Sjsg
106*f005ef32Sjsg /* XE_LP Render / Compute Per-Class */
107*f005ef32Sjsg static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
1081bb76ff1Sjsg COMMON_BASE_HAS_EU,
1091bb76ff1Sjsg COMMON_BASE_RENDER,
1101bb76ff1Sjsg COMMON_GEN12BASE_RENDER,
1111bb76ff1Sjsg };
1121bb76ff1Sjsg
113*f005ef32Sjsg /* GEN8+ Render / Compute Per-Engine-Instance */
114*f005ef32Sjsg static const struct __guc_mmio_reg_descr gen8_rc_inst_regs[] = {
1151bb76ff1Sjsg COMMON_BASE_ENGINE_INSTANCE,
1161bb76ff1Sjsg };
1171bb76ff1Sjsg
118*f005ef32Sjsg /* GEN8+ Media Decode/Encode Per-Engine-Instance */
119*f005ef32Sjsg static const struct __guc_mmio_reg_descr gen8_vd_inst_regs[] = {
1201bb76ff1Sjsg COMMON_BASE_ENGINE_INSTANCE,
1211bb76ff1Sjsg };
1221bb76ff1Sjsg
123*f005ef32Sjsg /* XE_LP Video Enhancement Per-Class */
124*f005ef32Sjsg static const struct __guc_mmio_reg_descr xe_lp_vec_class_regs[] = {
1251bb76ff1Sjsg COMMON_GEN12BASE_VEC,
1261bb76ff1Sjsg };
1271bb76ff1Sjsg
128*f005ef32Sjsg /* GEN8+ Video Enhancement Per-Engine-Instance */
129*f005ef32Sjsg static const struct __guc_mmio_reg_descr gen8_vec_inst_regs[] = {
1301bb76ff1Sjsg COMMON_BASE_ENGINE_INSTANCE,
1311bb76ff1Sjsg };
1321bb76ff1Sjsg
133*f005ef32Sjsg /* GEN8+ Blitter Per-Engine-Instance */
134*f005ef32Sjsg static const struct __guc_mmio_reg_descr gen8_blt_inst_regs[] = {
1351bb76ff1Sjsg COMMON_BASE_ENGINE_INSTANCE,
1361bb76ff1Sjsg };
1371bb76ff1Sjsg
138*f005ef32Sjsg /* XE_LP - GSC Per-Engine-Instance */
139*f005ef32Sjsg static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = {
140*f005ef32Sjsg COMMON_BASE_ENGINE_INSTANCE,
141*f005ef32Sjsg };
142*f005ef32Sjsg
143*f005ef32Sjsg /* GEN8 - Global */
144*f005ef32Sjsg static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
1451bb76ff1Sjsg COMMON_BASE_GLOBAL,
146*f005ef32Sjsg COMMON_GEN8BASE_GLOBAL,
147*f005ef32Sjsg GEN8_GLOBAL,
1481bb76ff1Sjsg };
1491bb76ff1Sjsg
150*f005ef32Sjsg static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
1511bb76ff1Sjsg COMMON_BASE_HAS_EU,
1521bb76ff1Sjsg COMMON_BASE_RENDER,
1531bb76ff1Sjsg };
1541bb76ff1Sjsg
1551bb76ff1Sjsg /*
156*f005ef32Sjsg * Empty list to prevent warnings about unknown class/instance types
157*f005ef32Sjsg * as not all class/instanace types have entries on all platforms.
1581bb76ff1Sjsg */
1591bb76ff1Sjsg static const struct __guc_mmio_reg_descr empty_regs_list[] = {
1601bb76ff1Sjsg };
1611bb76ff1Sjsg
1621bb76ff1Sjsg #define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
1631bb76ff1Sjsg #define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x)
1641bb76ff1Sjsg #define MAKE_REGLIST(regslist, regsowner, regstype, class) \
1651bb76ff1Sjsg { \
1661bb76ff1Sjsg regslist, \
1671bb76ff1Sjsg ARRAY_SIZE(regslist), \
1681bb76ff1Sjsg TO_GCAP_DEF_OWNER(regsowner), \
1691bb76ff1Sjsg TO_GCAP_DEF_TYPE(regstype), \
1701bb76ff1Sjsg class, \
1711bb76ff1Sjsg NULL, \
1721bb76ff1Sjsg }
1731bb76ff1Sjsg
1741bb76ff1Sjsg /* List of lists */
175*f005ef32Sjsg static const struct __guc_mmio_reg_descr_group gen8_lists[] = {
176*f005ef32Sjsg MAKE_REGLIST(gen8_global_regs, PF, GLOBAL, 0),
177*f005ef32Sjsg MAKE_REGLIST(gen8_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
178*f005ef32Sjsg MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
179*f005ef32Sjsg MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
180*f005ef32Sjsg MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
181*f005ef32Sjsg MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
182*f005ef32Sjsg MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
183*f005ef32Sjsg MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
184*f005ef32Sjsg MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
185*f005ef32Sjsg MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
186*f005ef32Sjsg MAKE_REGLIST(empty_regs_list, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
1871bb76ff1Sjsg {}
1881bb76ff1Sjsg };
1891bb76ff1Sjsg
190*f005ef32Sjsg static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = {
191*f005ef32Sjsg MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
192*f005ef32Sjsg MAKE_REGLIST(xe_lp_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
193*f005ef32Sjsg MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
194*f005ef32Sjsg MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
195*f005ef32Sjsg MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
196*f005ef32Sjsg MAKE_REGLIST(xe_lp_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
197*f005ef32Sjsg MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
198*f005ef32Sjsg MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
199*f005ef32Sjsg MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
200*f005ef32Sjsg MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
201*f005ef32Sjsg MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
2021bb76ff1Sjsg {}
2031bb76ff1Sjsg };
2041bb76ff1Sjsg
2051bb76ff1Sjsg static const struct __guc_mmio_reg_descr_group *
guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group * reglists,u32 owner,u32 type,u32 id)2061bb76ff1Sjsg guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
2071bb76ff1Sjsg u32 owner, u32 type, u32 id)
2081bb76ff1Sjsg {
2091bb76ff1Sjsg int i;
2101bb76ff1Sjsg
2111bb76ff1Sjsg if (!reglists)
2121bb76ff1Sjsg return NULL;
2131bb76ff1Sjsg
2141bb76ff1Sjsg for (i = 0; reglists[i].list; ++i) {
2151bb76ff1Sjsg if (reglists[i].owner == owner && reglists[i].type == type &&
2161bb76ff1Sjsg (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
2171bb76ff1Sjsg return ®lists[i];
2181bb76ff1Sjsg }
2191bb76ff1Sjsg
2201bb76ff1Sjsg return NULL;
2211bb76ff1Sjsg }
2221bb76ff1Sjsg
2231bb76ff1Sjsg static struct __guc_mmio_reg_descr_group *
guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group * reglists,u32 owner,u32 type,u32 id)2241bb76ff1Sjsg guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists,
2251bb76ff1Sjsg u32 owner, u32 type, u32 id)
2261bb76ff1Sjsg {
2271bb76ff1Sjsg int i;
2281bb76ff1Sjsg
2291bb76ff1Sjsg if (!reglists)
2301bb76ff1Sjsg return NULL;
2311bb76ff1Sjsg
2321bb76ff1Sjsg for (i = 0; reglists[i].extlist; ++i) {
2331bb76ff1Sjsg if (reglists[i].owner == owner && reglists[i].type == type &&
2341bb76ff1Sjsg (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
2351bb76ff1Sjsg return ®lists[i];
2361bb76ff1Sjsg }
2371bb76ff1Sjsg
2381bb76ff1Sjsg return NULL;
2391bb76ff1Sjsg }
2401bb76ff1Sjsg
guc_capture_free_extlists(struct __guc_mmio_reg_descr_group * reglists)2411bb76ff1Sjsg static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists)
2421bb76ff1Sjsg {
2431bb76ff1Sjsg int i = 0;
2441bb76ff1Sjsg
2451bb76ff1Sjsg if (!reglists)
2461bb76ff1Sjsg return;
2471bb76ff1Sjsg
2481bb76ff1Sjsg while (reglists[i].extlist)
2491bb76ff1Sjsg kfree(reglists[i++].extlist);
2501bb76ff1Sjsg }
2511bb76ff1Sjsg
2521bb76ff1Sjsg struct __ext_steer_reg {
2531bb76ff1Sjsg const char *name;
254*f005ef32Sjsg i915_mcr_reg_t reg;
2551bb76ff1Sjsg };
2561bb76ff1Sjsg
257*f005ef32Sjsg static const struct __ext_steer_reg gen8_extregs[] = {
258*f005ef32Sjsg {"GEN8_SAMPLER_INSTDONE", GEN8_SAMPLER_INSTDONE},
259*f005ef32Sjsg {"GEN8_ROW_INSTDONE", GEN8_ROW_INSTDONE}
260*f005ef32Sjsg };
261*f005ef32Sjsg
262*f005ef32Sjsg static const struct __ext_steer_reg xehpg_extregs[] = {
263*f005ef32Sjsg {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
2641bb76ff1Sjsg };
2651bb76ff1Sjsg
__fill_ext_reg(struct __guc_mmio_reg_descr * ext,const struct __ext_steer_reg * extlist,int slice_id,int subslice_id)2661bb76ff1Sjsg static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
2671bb76ff1Sjsg const struct __ext_steer_reg *extlist,
2681bb76ff1Sjsg int slice_id, int subslice_id)
2691bb76ff1Sjsg {
270*f005ef32Sjsg ext->reg = _MMIO(i915_mmio_reg_offset(extlist->reg));
2711bb76ff1Sjsg ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
2721bb76ff1Sjsg ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
2731bb76ff1Sjsg ext->regname = extlist->name;
2741bb76ff1Sjsg }
2751bb76ff1Sjsg
2761bb76ff1Sjsg static int
__alloc_ext_regs(struct __guc_mmio_reg_descr_group * newlist,const struct __guc_mmio_reg_descr_group * rootlist,int num_regs)2771bb76ff1Sjsg __alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
2781bb76ff1Sjsg const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
2791bb76ff1Sjsg {
2801bb76ff1Sjsg struct __guc_mmio_reg_descr *list;
2811bb76ff1Sjsg
2821bb76ff1Sjsg list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
2831bb76ff1Sjsg if (!list)
2841bb76ff1Sjsg return -ENOMEM;
2851bb76ff1Sjsg
2861bb76ff1Sjsg newlist->extlist = list;
2871bb76ff1Sjsg newlist->num_regs = num_regs;
2881bb76ff1Sjsg newlist->owner = rootlist->owner;
2891bb76ff1Sjsg newlist->engine = rootlist->engine;
2901bb76ff1Sjsg newlist->type = rootlist->type;
2911bb76ff1Sjsg
2921bb76ff1Sjsg return 0;
2931bb76ff1Sjsg }
2941bb76ff1Sjsg
2951bb76ff1Sjsg static void
guc_capture_alloc_steered_lists(struct intel_guc * guc,const struct __guc_mmio_reg_descr_group * lists)296*f005ef32Sjsg guc_capture_alloc_steered_lists(struct intel_guc *guc,
2971bb76ff1Sjsg const struct __guc_mmio_reg_descr_group *lists)
2981bb76ff1Sjsg {
2991bb76ff1Sjsg struct intel_gt *gt = guc_to_gt(guc);
3001bb76ff1Sjsg int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
3011bb76ff1Sjsg const struct __guc_mmio_reg_descr_group *list;
3021bb76ff1Sjsg struct __guc_mmio_reg_descr_group *extlists;
3031bb76ff1Sjsg struct __guc_mmio_reg_descr *extarray;
304*f005ef32Sjsg bool has_xehpg_extregs;
3051bb76ff1Sjsg
306*f005ef32Sjsg /* steered registers currently only exist for the render-class */
3071bb76ff1Sjsg list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
308*f005ef32Sjsg GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
309*f005ef32Sjsg GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE);
3101bb76ff1Sjsg /* skip if extlists was previously allocated */
3111bb76ff1Sjsg if (!list || guc->capture->extlists)
3121bb76ff1Sjsg return;
3131bb76ff1Sjsg
314*f005ef32Sjsg has_xehpg_extregs = GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55);
3151bb76ff1Sjsg
316*f005ef32Sjsg num_steer_regs = ARRAY_SIZE(gen8_extregs);
317*f005ef32Sjsg if (has_xehpg_extregs)
3181bb76ff1Sjsg num_steer_regs += ARRAY_SIZE(xehpg_extregs);
3191bb76ff1Sjsg
3201bb76ff1Sjsg for_each_ss_steering(iter, gt, slice, subslice)
3211bb76ff1Sjsg num_tot_regs += num_steer_regs;
3221bb76ff1Sjsg
3231bb76ff1Sjsg if (!num_tot_regs)
3241bb76ff1Sjsg return;
3251bb76ff1Sjsg
3261bb76ff1Sjsg /* allocate an extra for an end marker */
3271bb76ff1Sjsg extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
3281bb76ff1Sjsg if (!extlists)
3291bb76ff1Sjsg return;
3301bb76ff1Sjsg
3311bb76ff1Sjsg if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
3321bb76ff1Sjsg kfree(extlists);
3331bb76ff1Sjsg return;
3341bb76ff1Sjsg }
3351bb76ff1Sjsg
3361bb76ff1Sjsg extarray = extlists[0].extlist;
3371bb76ff1Sjsg for_each_ss_steering(iter, gt, slice, subslice) {
338*f005ef32Sjsg for (i = 0; i < ARRAY_SIZE(gen8_extregs); ++i) {
339*f005ef32Sjsg __fill_ext_reg(extarray, &gen8_extregs[i], slice, subslice);
3401bb76ff1Sjsg ++extarray;
3411bb76ff1Sjsg }
342*f005ef32Sjsg
343*f005ef32Sjsg if (has_xehpg_extregs) {
3441bb76ff1Sjsg for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
3451bb76ff1Sjsg __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
3461bb76ff1Sjsg ++extarray;
3471bb76ff1Sjsg }
3481bb76ff1Sjsg }
3491bb76ff1Sjsg }
3501bb76ff1Sjsg
351*f005ef32Sjsg guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs);
3521bb76ff1Sjsg guc->capture->extlists = extlists;
3531bb76ff1Sjsg }
3541bb76ff1Sjsg
3551bb76ff1Sjsg static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct intel_guc * guc)3561bb76ff1Sjsg guc_capture_get_device_reglist(struct intel_guc *guc)
3571bb76ff1Sjsg {
3581bb76ff1Sjsg struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
359*f005ef32Sjsg const struct __guc_mmio_reg_descr_group *lists;
3601bb76ff1Sjsg
361*f005ef32Sjsg if (GRAPHICS_VER(i915) >= 12)
362*f005ef32Sjsg lists = xe_lp_lists;
363*f005ef32Sjsg else
364*f005ef32Sjsg lists = gen8_lists;
365*f005ef32Sjsg
3661bb76ff1Sjsg /*
3671bb76ff1Sjsg * For certain engine classes, there are slice and subslice
3681bb76ff1Sjsg * level registers requiring steering. We allocate and populate
3691bb76ff1Sjsg * these at init time based on hw config add it as an extension
3701bb76ff1Sjsg * list at the end of the pre-populated render list.
3711bb76ff1Sjsg */
372*f005ef32Sjsg guc_capture_alloc_steered_lists(guc, lists);
3731bb76ff1Sjsg
374*f005ef32Sjsg return lists;
3751bb76ff1Sjsg }
3761bb76ff1Sjsg
3771bb76ff1Sjsg static const char *
__stringify_type(u32 type)3781bb76ff1Sjsg __stringify_type(u32 type)
3791bb76ff1Sjsg {
3801bb76ff1Sjsg switch (type) {
3811bb76ff1Sjsg case GUC_CAPTURE_LIST_TYPE_GLOBAL:
3821bb76ff1Sjsg return "Global";
3831bb76ff1Sjsg case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
3841bb76ff1Sjsg return "Class";
3851bb76ff1Sjsg case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
3861bb76ff1Sjsg return "Instance";
3871bb76ff1Sjsg default:
3881bb76ff1Sjsg break;
3891bb76ff1Sjsg }
3901bb76ff1Sjsg
3911bb76ff1Sjsg return "unknown";
3921bb76ff1Sjsg }
3931bb76ff1Sjsg
3941bb76ff1Sjsg static const char *
__stringify_engclass(u32 class)3951bb76ff1Sjsg __stringify_engclass(u32 class)
3961bb76ff1Sjsg {
3971bb76ff1Sjsg switch (class) {
398*f005ef32Sjsg case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
399*f005ef32Sjsg return "Render/Compute";
400*f005ef32Sjsg case GUC_CAPTURE_LIST_CLASS_VIDEO:
4011bb76ff1Sjsg return "Video";
402*f005ef32Sjsg case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
4031bb76ff1Sjsg return "VideoEnhance";
404*f005ef32Sjsg case GUC_CAPTURE_LIST_CLASS_BLITTER:
4051bb76ff1Sjsg return "Blitter";
406*f005ef32Sjsg case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
407*f005ef32Sjsg return "GSC-Other";
4081bb76ff1Sjsg default:
4091bb76ff1Sjsg break;
4101bb76ff1Sjsg }
4111bb76ff1Sjsg
4121bb76ff1Sjsg return "unknown";
4131bb76ff1Sjsg }
4141bb76ff1Sjsg
4151bb76ff1Sjsg static int
guc_capture_list_init(struct intel_guc * guc,u32 owner,u32 type,u32 classid,struct guc_mmio_reg * ptr,u16 num_entries)4161bb76ff1Sjsg guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
4171bb76ff1Sjsg struct guc_mmio_reg *ptr, u16 num_entries)
4181bb76ff1Sjsg {
4191bb76ff1Sjsg u32 i = 0, j = 0;
4201bb76ff1Sjsg const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
4211bb76ff1Sjsg struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
4221bb76ff1Sjsg const struct __guc_mmio_reg_descr_group *match;
4231bb76ff1Sjsg struct __guc_mmio_reg_descr_group *matchext;
4241bb76ff1Sjsg
4251bb76ff1Sjsg if (!reglists)
4261bb76ff1Sjsg return -ENODEV;
4271bb76ff1Sjsg
4281bb76ff1Sjsg match = guc_capture_get_one_list(reglists, owner, type, classid);
4291bb76ff1Sjsg if (!match)
4301bb76ff1Sjsg return -ENODATA;
4311bb76ff1Sjsg
4321bb76ff1Sjsg for (i = 0; i < num_entries && i < match->num_regs; ++i) {
4331bb76ff1Sjsg ptr[i].offset = match->list[i].reg.reg;
4341bb76ff1Sjsg ptr[i].value = 0xDEADF00D;
4351bb76ff1Sjsg ptr[i].flags = match->list[i].flags;
4361bb76ff1Sjsg ptr[i].mask = match->list[i].mask;
4371bb76ff1Sjsg }
4381bb76ff1Sjsg
4391bb76ff1Sjsg matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid);
4401bb76ff1Sjsg if (matchext) {
4411bb76ff1Sjsg for (i = match->num_regs, j = 0; i < num_entries &&
4421bb76ff1Sjsg i < (match->num_regs + matchext->num_regs) &&
4431bb76ff1Sjsg j < matchext->num_regs; ++i, ++j) {
4441bb76ff1Sjsg ptr[i].offset = matchext->extlist[j].reg.reg;
4451bb76ff1Sjsg ptr[i].value = 0xDEADF00D;
4461bb76ff1Sjsg ptr[i].flags = matchext->extlist[j].flags;
4471bb76ff1Sjsg ptr[i].mask = matchext->extlist[j].mask;
4481bb76ff1Sjsg }
4491bb76ff1Sjsg }
4501bb76ff1Sjsg if (i < num_entries)
451*f005ef32Sjsg guc_dbg(guc, "Got short capture reglist init: %d out %d.\n", i, num_entries);
4521bb76ff1Sjsg
4531bb76ff1Sjsg return 0;
4541bb76ff1Sjsg }
4551bb76ff1Sjsg
4561bb76ff1Sjsg static int
guc_cap_list_num_regs(struct intel_guc_state_capture * gc,u32 owner,u32 type,u32 classid)4571bb76ff1Sjsg guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid)
4581bb76ff1Sjsg {
4591bb76ff1Sjsg const struct __guc_mmio_reg_descr_group *match;
4601bb76ff1Sjsg struct __guc_mmio_reg_descr_group *matchext;
4611bb76ff1Sjsg int num_regs;
4621bb76ff1Sjsg
4631bb76ff1Sjsg match = guc_capture_get_one_list(gc->reglists, owner, type, classid);
4641bb76ff1Sjsg if (!match)
4651bb76ff1Sjsg return 0;
4661bb76ff1Sjsg
4671bb76ff1Sjsg num_regs = match->num_regs;
4681bb76ff1Sjsg
4691bb76ff1Sjsg matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid);
4701bb76ff1Sjsg if (matchext)
4711bb76ff1Sjsg num_regs += matchext->num_regs;
4721bb76ff1Sjsg
4731bb76ff1Sjsg return num_regs;
4741bb76ff1Sjsg }
4751bb76ff1Sjsg
4761bb76ff1Sjsg static int
guc_capture_getlistsize(struct intel_guc * guc,u32 owner,u32 type,u32 classid,size_t * size,bool is_purpose_est)4771bb76ff1Sjsg guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
4781bb76ff1Sjsg size_t *size, bool is_purpose_est)
4791bb76ff1Sjsg {
4801bb76ff1Sjsg struct intel_guc_state_capture *gc = guc->capture;
4811bb76ff1Sjsg struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
4821bb76ff1Sjsg int num_regs;
4831bb76ff1Sjsg
4841bb76ff1Sjsg if (!gc->reglists) {
485*f005ef32Sjsg guc_warn(guc, "No capture reglist for this device\n");
4861bb76ff1Sjsg return -ENODEV;
4871bb76ff1Sjsg }
4881bb76ff1Sjsg
4891bb76ff1Sjsg if (cache->is_valid) {
4901bb76ff1Sjsg *size = cache->size;
4911bb76ff1Sjsg return cache->status;
4921bb76ff1Sjsg }
4931bb76ff1Sjsg
4941bb76ff1Sjsg if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
4951bb76ff1Sjsg !guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
4961bb76ff1Sjsg if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
497*f005ef32Sjsg guc_warn(guc, "Missing capture reglist: global!\n");
4981bb76ff1Sjsg else
499*f005ef32Sjsg guc_warn(guc, "Missing capture reglist: %s(%u):%s(%u)!\n",
5001bb76ff1Sjsg __stringify_type(type), type,
5011bb76ff1Sjsg __stringify_engclass(classid), classid);
5021bb76ff1Sjsg return -ENODATA;
5031bb76ff1Sjsg }
5041bb76ff1Sjsg
5051bb76ff1Sjsg num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
5061bb76ff1Sjsg /* intentional empty lists can exist depending on hw config */
5071bb76ff1Sjsg if (!num_regs)
5081bb76ff1Sjsg return -ENODATA;
5091bb76ff1Sjsg
5101bb76ff1Sjsg if (size)
5111bb76ff1Sjsg *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
5121bb76ff1Sjsg (num_regs * sizeof(struct guc_mmio_reg)));
5131bb76ff1Sjsg
5141bb76ff1Sjsg return 0;
5151bb76ff1Sjsg }
5161bb76ff1Sjsg
5171bb76ff1Sjsg int
intel_guc_capture_getlistsize(struct intel_guc * guc,u32 owner,u32 type,u32 classid,size_t * size)5181bb76ff1Sjsg intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
5191bb76ff1Sjsg size_t *size)
5201bb76ff1Sjsg {
5211bb76ff1Sjsg return guc_capture_getlistsize(guc, owner, type, classid, size, false);
5221bb76ff1Sjsg }
5231bb76ff1Sjsg
5241bb76ff1Sjsg static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
5251bb76ff1Sjsg
5261bb76ff1Sjsg int
intel_guc_capture_getlist(struct intel_guc * guc,u32 owner,u32 type,u32 classid,void ** outptr)5271bb76ff1Sjsg intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
5281bb76ff1Sjsg void **outptr)
5291bb76ff1Sjsg {
5301bb76ff1Sjsg struct intel_guc_state_capture *gc = guc->capture;
5311bb76ff1Sjsg struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
5321bb76ff1Sjsg struct guc_debug_capture_list *listnode;
5331bb76ff1Sjsg int ret, num_regs;
5341bb76ff1Sjsg u8 *caplist, *tmp;
5351bb76ff1Sjsg size_t size = 0;
5361bb76ff1Sjsg
5371bb76ff1Sjsg if (!gc->reglists)
5381bb76ff1Sjsg return -ENODEV;
5391bb76ff1Sjsg
5401bb76ff1Sjsg if (cache->is_valid) {
5411bb76ff1Sjsg *outptr = cache->ptr;
5421bb76ff1Sjsg return cache->status;
5431bb76ff1Sjsg }
5441bb76ff1Sjsg
5451bb76ff1Sjsg /*
5461bb76ff1Sjsg * ADS population of input registers is a good
5471bb76ff1Sjsg * time to pre-allocate cachelist output nodes
5481bb76ff1Sjsg */
5491bb76ff1Sjsg guc_capture_create_prealloc_nodes(guc);
5501bb76ff1Sjsg
5511bb76ff1Sjsg ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size);
5521bb76ff1Sjsg if (ret) {
5531bb76ff1Sjsg cache->is_valid = true;
5541bb76ff1Sjsg cache->ptr = NULL;
5551bb76ff1Sjsg cache->size = 0;
5561bb76ff1Sjsg cache->status = ret;
5571bb76ff1Sjsg return ret;
5581bb76ff1Sjsg }
5591bb76ff1Sjsg
5601bb76ff1Sjsg caplist = kzalloc(size, GFP_KERNEL);
5611bb76ff1Sjsg if (!caplist) {
562*f005ef32Sjsg guc_dbg(guc, "Failed to alloc cached register capture list");
5631bb76ff1Sjsg return -ENOMEM;
5641bb76ff1Sjsg }
5651bb76ff1Sjsg
5661bb76ff1Sjsg /* populate capture list header */
5671bb76ff1Sjsg tmp = caplist;
5681bb76ff1Sjsg num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid);
5691bb76ff1Sjsg listnode = (struct guc_debug_capture_list *)tmp;
5701bb76ff1Sjsg listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
5711bb76ff1Sjsg
5721bb76ff1Sjsg /* populate list of register descriptor */
5731bb76ff1Sjsg tmp += sizeof(struct guc_debug_capture_list);
5741bb76ff1Sjsg guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs);
5751bb76ff1Sjsg
5761bb76ff1Sjsg /* cache this list */
5771bb76ff1Sjsg cache->is_valid = true;
5781bb76ff1Sjsg cache->ptr = caplist;
5791bb76ff1Sjsg cache->size = size;
5801bb76ff1Sjsg cache->status = 0;
5811bb76ff1Sjsg
5821bb76ff1Sjsg *outptr = caplist;
5831bb76ff1Sjsg
5841bb76ff1Sjsg return 0;
5851bb76ff1Sjsg }
5861bb76ff1Sjsg
5871bb76ff1Sjsg int
intel_guc_capture_getnullheader(struct intel_guc * guc,void ** outptr,size_t * size)5881bb76ff1Sjsg intel_guc_capture_getnullheader(struct intel_guc *guc,
5891bb76ff1Sjsg void **outptr, size_t *size)
5901bb76ff1Sjsg {
5911bb76ff1Sjsg struct intel_guc_state_capture *gc = guc->capture;
5921bb76ff1Sjsg int tmp = sizeof(u32) * 4;
5931bb76ff1Sjsg void *null_header;
5941bb76ff1Sjsg
5951bb76ff1Sjsg if (gc->ads_null_cache) {
5961bb76ff1Sjsg *outptr = gc->ads_null_cache;
5971bb76ff1Sjsg *size = tmp;
5981bb76ff1Sjsg return 0;
5991bb76ff1Sjsg }
6001bb76ff1Sjsg
6011bb76ff1Sjsg null_header = kzalloc(tmp, GFP_KERNEL);
6021bb76ff1Sjsg if (!null_header) {
603*f005ef32Sjsg guc_dbg(guc, "Failed to alloc cached register capture null list");
6041bb76ff1Sjsg return -ENOMEM;
6051bb76ff1Sjsg }
6061bb76ff1Sjsg
6071bb76ff1Sjsg gc->ads_null_cache = null_header;
6081bb76ff1Sjsg *outptr = null_header;
6091bb76ff1Sjsg *size = tmp;
6101bb76ff1Sjsg
6111bb76ff1Sjsg return 0;
6121bb76ff1Sjsg }
6131bb76ff1Sjsg
6141bb76ff1Sjsg static int
guc_capture_output_min_size_est(struct intel_guc * guc)6151bb76ff1Sjsg guc_capture_output_min_size_est(struct intel_guc *guc)
6161bb76ff1Sjsg {
6171bb76ff1Sjsg struct intel_gt *gt = guc_to_gt(guc);
6181bb76ff1Sjsg struct intel_engine_cs *engine;
6191bb76ff1Sjsg enum intel_engine_id id;
6201bb76ff1Sjsg int worst_min_size = 0;
6211bb76ff1Sjsg size_t tmp = 0;
6221bb76ff1Sjsg
6231bb76ff1Sjsg if (!guc->capture)
6241bb76ff1Sjsg return -ENODEV;
6251bb76ff1Sjsg
6261bb76ff1Sjsg /*
6271bb76ff1Sjsg * If every single engine-instance suffered a failure in quick succession but
6281bb76ff1Sjsg * were all unrelated, then a burst of multiple error-capture events would dump
6291bb76ff1Sjsg * registers for every one engine instance, one at a time. In this case, GuC
6301bb76ff1Sjsg * would even dump the global-registers repeatedly.
6311bb76ff1Sjsg *
6321bb76ff1Sjsg * For each engine instance, there would be 1 x guc_state_capture_group_t output
6331bb76ff1Sjsg * followed by 3 x guc_state_capture_t lists. The latter is how the register
6341bb76ff1Sjsg * dumps are split across different register types (where the '3' are global vs class
6351bb76ff1Sjsg * vs instance).
6361bb76ff1Sjsg */
6371bb76ff1Sjsg for_each_engine(engine, gt, id) {
6381bb76ff1Sjsg worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
6391bb76ff1Sjsg (3 * sizeof(struct guc_state_capture_header_t));
6401bb76ff1Sjsg
6411bb76ff1Sjsg if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true))
6421bb76ff1Sjsg worst_min_size += tmp;
6431bb76ff1Sjsg
6441bb76ff1Sjsg if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
6451bb76ff1Sjsg engine->class, &tmp, true)) {
6461bb76ff1Sjsg worst_min_size += tmp;
6471bb76ff1Sjsg }
6481bb76ff1Sjsg if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
6491bb76ff1Sjsg engine->class, &tmp, true)) {
6501bb76ff1Sjsg worst_min_size += tmp;
6511bb76ff1Sjsg }
6521bb76ff1Sjsg }
6531bb76ff1Sjsg
6541bb76ff1Sjsg return worst_min_size;
6551bb76ff1Sjsg }
6561bb76ff1Sjsg
6571bb76ff1Sjsg /*
6581bb76ff1Sjsg * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
6591bb76ff1Sjsg * before the i915 can read the data out and process it
6601bb76ff1Sjsg */
6611bb76ff1Sjsg #define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
6621bb76ff1Sjsg
check_guc_capture_size(struct intel_guc * guc)6631bb76ff1Sjsg static void check_guc_capture_size(struct intel_guc *guc)
6641bb76ff1Sjsg {
6651bb76ff1Sjsg int min_size = guc_capture_output_min_size_est(guc);
6661bb76ff1Sjsg int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
6671bb76ff1Sjsg u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
6681bb76ff1Sjsg
6691bb76ff1Sjsg /*
6701bb76ff1Sjsg * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB)
6711bb76ff1Sjsg * Additionally, its based on space needed to fit all engines getting reset at once
6721bb76ff1Sjsg * within the same G2H handler task slot. This is very unlikely. However, if GuC really
6731bb76ff1Sjsg * does run out of space for whatever reason, we will see an separate warning message
6741bb76ff1Sjsg * when processing the G2H event capture-notification, search for:
6751bb76ff1Sjsg * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
6761bb76ff1Sjsg */
6771bb76ff1Sjsg if (min_size < 0)
678*f005ef32Sjsg guc_warn(guc, "Failed to calculate error state capture buffer minimum size: %d!\n",
6791bb76ff1Sjsg min_size);
6801bb76ff1Sjsg else if (min_size > buffer_size)
681*f005ef32Sjsg guc_warn(guc, "Error state capture buffer maybe small: %d < %d\n",
6821bb76ff1Sjsg buffer_size, min_size);
6831bb76ff1Sjsg else if (spare_size > buffer_size)
684*f005ef32Sjsg guc_dbg(guc, "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
6851bb76ff1Sjsg buffer_size, spare_size, min_size);
6861bb76ff1Sjsg }
6871bb76ff1Sjsg
6881bb76ff1Sjsg /*
6891bb76ff1Sjsg * KMD Init time flows:
6901bb76ff1Sjsg * --------------------
6911bb76ff1Sjsg * --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
6921bb76ff1Sjsg * intel_guc_ads acquires the register lists by calling
6931bb76ff1Sjsg * intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times,
6941bb76ff1Sjsg * where n = 1 for global-reg-list +
6951bb76ff1Sjsg * num_engine_classes for class-reg-list +
6961bb76ff1Sjsg * num_engine_classes for instance-reg-list
6971bb76ff1Sjsg * (since all instances of the same engine-class type
6981bb76ff1Sjsg * have an identical engine-instance register-list).
6991bb76ff1Sjsg * ADS module also calls separately for PF vs VF.
7001bb76ff1Sjsg *
7011bb76ff1Sjsg * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
7021bb76ff1Sjsg * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
7031bb76ff1Sjsg * Note2: 'x 3' to hold multiple capture groups
7041bb76ff1Sjsg *
7051bb76ff1Sjsg * GUC Runtime notify capture:
7061bb76ff1Sjsg * --------------------------
7071bb76ff1Sjsg * --> G2H STATE_CAPTURE_NOTIFICATION
7081bb76ff1Sjsg * L--> intel_guc_capture_process
7091bb76ff1Sjsg * L--> Loop through B (head..tail) and for each engine instance's
7101bb76ff1Sjsg * err-state-captured register-list we find, we alloc 'C':
7111bb76ff1Sjsg * --> alloc C: A capture-output-node structure that includes misc capture info along
7121bb76ff1Sjsg * with 3 register list dumps (global, engine-class and engine-instance)
7131bb76ff1Sjsg * This node is created from a pre-allocated list of blank nodes in
7141bb76ff1Sjsg * guc->capture->cachelist and populated with the error-capture
7151bb76ff1Sjsg * data from GuC and then it's added into guc->capture->outlist linked
7161bb76ff1Sjsg * list. This list is used for matchup and printout by i915_gpu_coredump
7171bb76ff1Sjsg * and err_print_gt, (when user invokes the error capture sysfs).
7181bb76ff1Sjsg *
7191bb76ff1Sjsg * GUC --> notify context reset:
7201bb76ff1Sjsg * -----------------------------
7211bb76ff1Sjsg * --> G2H CONTEXT RESET
7221bb76ff1Sjsg * L--> guc_handle_context_reset --> i915_capture_error_state
7231bb76ff1Sjsg * L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines
7241bb76ff1Sjsg * --> capture_engine(..IS_GUC_CAPTURE)
7251bb76ff1Sjsg * L--> intel_guc_capture_get_matching_node is where
7261bb76ff1Sjsg * detach C from internal linked list and add it into
7271bb76ff1Sjsg * intel_engine_coredump struct (if the context and
7281bb76ff1Sjsg * engine of the event notification matches a node
7291bb76ff1Sjsg * in the link list).
7301bb76ff1Sjsg *
7311bb76ff1Sjsg * User Sysfs / Debugfs
7321bb76ff1Sjsg * --------------------
7331bb76ff1Sjsg * --> i915_gpu_coredump_copy_to_buffer->
7341bb76ff1Sjsg * L--> err_print_to_sgl --> err_print_gt
7351bb76ff1Sjsg * L--> error_print_guc_captures
7361bb76ff1Sjsg * L--> intel_guc_capture_print_node prints the
7371bb76ff1Sjsg * register lists values of the attached node
7381bb76ff1Sjsg * on the error-engine-dump being reported.
7391bb76ff1Sjsg * L--> i915_reset_error_state ... -->__i915_gpu_coredump_free
7401bb76ff1Sjsg * L--> ... cleanup_gt -->
7411bb76ff1Sjsg * L--> intel_guc_capture_free_node returns the
7421bb76ff1Sjsg * capture-output-node back to the internal
7431bb76ff1Sjsg * cachelist for reuse.
7441bb76ff1Sjsg *
7451bb76ff1Sjsg */
7461bb76ff1Sjsg
guc_capture_buf_cnt(struct __guc_capture_bufstate * buf)7471bb76ff1Sjsg static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
7481bb76ff1Sjsg {
7491bb76ff1Sjsg if (buf->wr >= buf->rd)
7501bb76ff1Sjsg return (buf->wr - buf->rd);
7511bb76ff1Sjsg return (buf->size - buf->rd) + buf->wr;
7521bb76ff1Sjsg }
7531bb76ff1Sjsg
guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate * buf)7541bb76ff1Sjsg static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
7551bb76ff1Sjsg {
7561bb76ff1Sjsg if (buf->rd > buf->wr)
7571bb76ff1Sjsg return (buf->size - buf->rd);
7581bb76ff1Sjsg return (buf->wr - buf->rd);
7591bb76ff1Sjsg }
7601bb76ff1Sjsg
7611bb76ff1Sjsg /*
7621bb76ff1Sjsg * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
7631bb76ff1Sjsg *
7641bb76ff1Sjsg * The GuC Log buffer region for error-capture is managed like a ring buffer.
7651bb76ff1Sjsg * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
7661bb76ff1Sjsg * Additionally, as per the current and foreseeable future, all packed error-
7671bb76ff1Sjsg * capture output structures are dword aligned.
7681bb76ff1Sjsg *
7691bb76ff1Sjsg * That said, if the GuC firmware is in the midst of writing a structure that is larger
7701bb76ff1Sjsg * than one dword but the tail end of the err-capture buffer-region has lesser space left,
7711bb76ff1Sjsg * we would need to extract that structure one dword at a time straddled across the end,
7721bb76ff1Sjsg * onto the start of the ring.
7731bb76ff1Sjsg *
7741bb76ff1Sjsg * Below function, guc_capture_log_remove_dw is a helper for that. All callers of this
7751bb76ff1Sjsg * function would typically do a straight-up memcpy from the ring contents and will only
7761bb76ff1Sjsg * call this helper if their structure-extraction is straddling across the end of the
7771bb76ff1Sjsg * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
7781bb76ff1Sjsg * scalability for future expansion of output data types without requiring a redesign
7791bb76ff1Sjsg * of the flow controls.
7801bb76ff1Sjsg */
7811bb76ff1Sjsg static int
guc_capture_log_remove_dw(struct intel_guc * guc,struct __guc_capture_bufstate * buf,u32 * dw)7821bb76ff1Sjsg guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
7831bb76ff1Sjsg u32 *dw)
7841bb76ff1Sjsg {
7851bb76ff1Sjsg int tries = 2;
7861bb76ff1Sjsg int avail = 0;
7871bb76ff1Sjsg u32 *src_data;
7881bb76ff1Sjsg
7891bb76ff1Sjsg if (!guc_capture_buf_cnt(buf))
7901bb76ff1Sjsg return 0;
7911bb76ff1Sjsg
7921bb76ff1Sjsg while (tries--) {
7931bb76ff1Sjsg avail = guc_capture_buf_cnt_to_end(buf);
7941bb76ff1Sjsg if (avail >= sizeof(u32)) {
7951bb76ff1Sjsg src_data = (u32 *)(buf->data + buf->rd);
7961bb76ff1Sjsg *dw = *src_data;
7971bb76ff1Sjsg buf->rd += 4;
7981bb76ff1Sjsg return 4;
7991bb76ff1Sjsg }
8001bb76ff1Sjsg if (avail)
801*f005ef32Sjsg guc_dbg(guc, "Register capture log not dword aligned, skipping.\n");
8021bb76ff1Sjsg buf->rd = 0;
8031bb76ff1Sjsg }
8041bb76ff1Sjsg
8051bb76ff1Sjsg return 0;
8061bb76ff1Sjsg }
8071bb76ff1Sjsg
8081bb76ff1Sjsg static bool
guc_capture_data_extracted(struct __guc_capture_bufstate * b,int size,void * dest)8091bb76ff1Sjsg guc_capture_data_extracted(struct __guc_capture_bufstate *b,
8101bb76ff1Sjsg int size, void *dest)
8111bb76ff1Sjsg {
8121bb76ff1Sjsg if (guc_capture_buf_cnt_to_end(b) >= size) {
8131bb76ff1Sjsg memcpy(dest, (b->data + b->rd), size);
8141bb76ff1Sjsg b->rd += size;
8151bb76ff1Sjsg return true;
8161bb76ff1Sjsg }
8171bb76ff1Sjsg return false;
8181bb76ff1Sjsg }
8191bb76ff1Sjsg
8201bb76ff1Sjsg static int
guc_capture_log_get_group_hdr(struct intel_guc * guc,struct __guc_capture_bufstate * buf,struct guc_state_capture_group_header_t * ghdr)8211bb76ff1Sjsg guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
8221bb76ff1Sjsg struct guc_state_capture_group_header_t *ghdr)
8231bb76ff1Sjsg {
8241bb76ff1Sjsg int read = 0;
8251bb76ff1Sjsg int fullsize = sizeof(struct guc_state_capture_group_header_t);
8261bb76ff1Sjsg
8271bb76ff1Sjsg if (fullsize > guc_capture_buf_cnt(buf))
8281bb76ff1Sjsg return -1;
8291bb76ff1Sjsg
8301bb76ff1Sjsg if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr))
8311bb76ff1Sjsg return 0;
8321bb76ff1Sjsg
8331bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner);
8341bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, &ghdr->info);
8351bb76ff1Sjsg if (read != fullsize)
8361bb76ff1Sjsg return -1;
8371bb76ff1Sjsg
8381bb76ff1Sjsg return 0;
8391bb76ff1Sjsg }
8401bb76ff1Sjsg
8411bb76ff1Sjsg static int
guc_capture_log_get_data_hdr(struct intel_guc * guc,struct __guc_capture_bufstate * buf,struct guc_state_capture_header_t * hdr)8421bb76ff1Sjsg guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
8431bb76ff1Sjsg struct guc_state_capture_header_t *hdr)
8441bb76ff1Sjsg {
8451bb76ff1Sjsg int read = 0;
8461bb76ff1Sjsg int fullsize = sizeof(struct guc_state_capture_header_t);
8471bb76ff1Sjsg
8481bb76ff1Sjsg if (fullsize > guc_capture_buf_cnt(buf))
8491bb76ff1Sjsg return -1;
8501bb76ff1Sjsg
8511bb76ff1Sjsg if (guc_capture_data_extracted(buf, fullsize, (void *)hdr))
8521bb76ff1Sjsg return 0;
8531bb76ff1Sjsg
8541bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, &hdr->owner);
8551bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, &hdr->info);
8561bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca);
8571bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id);
8581bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios);
8591bb76ff1Sjsg if (read != fullsize)
8601bb76ff1Sjsg return -1;
8611bb76ff1Sjsg
8621bb76ff1Sjsg return 0;
8631bb76ff1Sjsg }
8641bb76ff1Sjsg
8651bb76ff1Sjsg static int
guc_capture_log_get_register(struct intel_guc * guc,struct __guc_capture_bufstate * buf,struct guc_mmio_reg * reg)8661bb76ff1Sjsg guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
8671bb76ff1Sjsg struct guc_mmio_reg *reg)
8681bb76ff1Sjsg {
8691bb76ff1Sjsg int read = 0;
8701bb76ff1Sjsg int fullsize = sizeof(struct guc_mmio_reg);
8711bb76ff1Sjsg
8721bb76ff1Sjsg if (fullsize > guc_capture_buf_cnt(buf))
8731bb76ff1Sjsg return -1;
8741bb76ff1Sjsg
8751bb76ff1Sjsg if (guc_capture_data_extracted(buf, fullsize, (void *)reg))
8761bb76ff1Sjsg return 0;
8771bb76ff1Sjsg
8781bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, ®->offset);
8791bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, ®->value);
8801bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, ®->flags);
8811bb76ff1Sjsg read += guc_capture_log_remove_dw(guc, buf, ®->mask);
8821bb76ff1Sjsg if (read != fullsize)
8831bb76ff1Sjsg return -1;
8841bb76ff1Sjsg
8851bb76ff1Sjsg return 0;
8861bb76ff1Sjsg }
8871bb76ff1Sjsg
8881bb76ff1Sjsg static void
guc_capture_delete_one_node(struct intel_guc * guc,struct __guc_capture_parsed_output * node)8891bb76ff1Sjsg guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
8901bb76ff1Sjsg {
8911bb76ff1Sjsg int i;
8921bb76ff1Sjsg
8931bb76ff1Sjsg for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
8941bb76ff1Sjsg kfree(node->reginfo[i].regs);
8951bb76ff1Sjsg list_del(&node->link);
8961bb76ff1Sjsg kfree(node);
8971bb76ff1Sjsg }
8981bb76ff1Sjsg
8991bb76ff1Sjsg static void
guc_capture_delete_prealloc_nodes(struct intel_guc * guc)9001bb76ff1Sjsg guc_capture_delete_prealloc_nodes(struct intel_guc *guc)
9011bb76ff1Sjsg {
9021bb76ff1Sjsg struct __guc_capture_parsed_output *n, *ntmp;
9031bb76ff1Sjsg
9041bb76ff1Sjsg /*
9051bb76ff1Sjsg * NOTE: At the end of driver operation, we must assume that we
9061bb76ff1Sjsg * have prealloc nodes in both the cachelist as well as outlist
9071bb76ff1Sjsg * if unclaimed error capture events occurred prior to shutdown.
9081bb76ff1Sjsg */
9091bb76ff1Sjsg list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link)
9101bb76ff1Sjsg guc_capture_delete_one_node(guc, n);
9111bb76ff1Sjsg
9121bb76ff1Sjsg list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link)
9131bb76ff1Sjsg guc_capture_delete_one_node(guc, n);
9141bb76ff1Sjsg }
9151bb76ff1Sjsg
9161bb76ff1Sjsg static void
guc_capture_add_node_to_list(struct __guc_capture_parsed_output * node,struct list_head * list)9171bb76ff1Sjsg guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
9181bb76ff1Sjsg struct list_head *list)
9191bb76ff1Sjsg {
9201bb76ff1Sjsg list_add_tail(&node->link, list);
9211bb76ff1Sjsg }
9221bb76ff1Sjsg
9231bb76ff1Sjsg static void
guc_capture_add_node_to_outlist(struct intel_guc_state_capture * gc,struct __guc_capture_parsed_output * node)9241bb76ff1Sjsg guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc,
9251bb76ff1Sjsg struct __guc_capture_parsed_output *node)
9261bb76ff1Sjsg {
9271bb76ff1Sjsg guc_capture_add_node_to_list(node, &gc->outlist);
9281bb76ff1Sjsg }
9291bb76ff1Sjsg
9301bb76ff1Sjsg static void
guc_capture_add_node_to_cachelist(struct intel_guc_state_capture * gc,struct __guc_capture_parsed_output * node)9311bb76ff1Sjsg guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc,
9321bb76ff1Sjsg struct __guc_capture_parsed_output *node)
9331bb76ff1Sjsg {
9341bb76ff1Sjsg guc_capture_add_node_to_list(node, &gc->cachelist);
9351bb76ff1Sjsg }
9361bb76ff1Sjsg
9371bb76ff1Sjsg static void
guc_capture_init_node(struct intel_guc * guc,struct __guc_capture_parsed_output * node)9381bb76ff1Sjsg guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
9391bb76ff1Sjsg {
9401bb76ff1Sjsg struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX];
9411bb76ff1Sjsg int i;
9421bb76ff1Sjsg
9431bb76ff1Sjsg for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
9441bb76ff1Sjsg tmp[i] = node->reginfo[i].regs;
9451bb76ff1Sjsg memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
9461bb76ff1Sjsg guc->capture->max_mmio_per_node);
9471bb76ff1Sjsg }
9481bb76ff1Sjsg memset(node, 0, sizeof(*node));
9491bb76ff1Sjsg for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
9501bb76ff1Sjsg node->reginfo[i].regs = tmp[i];
9511bb76ff1Sjsg
9521bb76ff1Sjsg INIT_LIST_HEAD(&node->link);
9531bb76ff1Sjsg }
9541bb76ff1Sjsg
9551bb76ff1Sjsg static struct __guc_capture_parsed_output *
guc_capture_get_prealloc_node(struct intel_guc * guc)9561bb76ff1Sjsg guc_capture_get_prealloc_node(struct intel_guc *guc)
9571bb76ff1Sjsg {
9581bb76ff1Sjsg struct __guc_capture_parsed_output *found = NULL;
9591bb76ff1Sjsg
9601bb76ff1Sjsg if (!list_empty(&guc->capture->cachelist)) {
9611bb76ff1Sjsg struct __guc_capture_parsed_output *n, *ntmp;
9621bb76ff1Sjsg
9631bb76ff1Sjsg /* get first avail node from the cache list */
9641bb76ff1Sjsg list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
9651bb76ff1Sjsg found = n;
9661bb76ff1Sjsg list_del(&n->link);
9671bb76ff1Sjsg break;
9681bb76ff1Sjsg }
9691bb76ff1Sjsg } else {
9701bb76ff1Sjsg struct __guc_capture_parsed_output *n, *ntmp;
9711bb76ff1Sjsg
9721bb76ff1Sjsg /* traverse down and steal back the oldest node already allocated */
9731bb76ff1Sjsg list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
9741bb76ff1Sjsg found = n;
9751bb76ff1Sjsg }
9761bb76ff1Sjsg if (found)
9771bb76ff1Sjsg list_del(&found->link);
9781bb76ff1Sjsg }
9791bb76ff1Sjsg if (found)
9801bb76ff1Sjsg guc_capture_init_node(guc, found);
9811bb76ff1Sjsg
9821bb76ff1Sjsg return found;
9831bb76ff1Sjsg }
9841bb76ff1Sjsg
9851bb76ff1Sjsg static struct __guc_capture_parsed_output *
guc_capture_alloc_one_node(struct intel_guc * guc)9861bb76ff1Sjsg guc_capture_alloc_one_node(struct intel_guc *guc)
9871bb76ff1Sjsg {
9881bb76ff1Sjsg struct __guc_capture_parsed_output *new;
9891bb76ff1Sjsg int i;
9901bb76ff1Sjsg
9911bb76ff1Sjsg new = kzalloc(sizeof(*new), GFP_KERNEL);
9921bb76ff1Sjsg if (!new)
9931bb76ff1Sjsg return NULL;
9941bb76ff1Sjsg
9951bb76ff1Sjsg for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
9961bb76ff1Sjsg new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node,
9971bb76ff1Sjsg sizeof(struct guc_mmio_reg), GFP_KERNEL);
9981bb76ff1Sjsg if (!new->reginfo[i].regs) {
9991bb76ff1Sjsg while (i)
10001bb76ff1Sjsg kfree(new->reginfo[--i].regs);
10011bb76ff1Sjsg kfree(new);
10021bb76ff1Sjsg return NULL;
10031bb76ff1Sjsg }
10041bb76ff1Sjsg }
10051bb76ff1Sjsg guc_capture_init_node(guc, new);
10061bb76ff1Sjsg
10071bb76ff1Sjsg return new;
10081bb76ff1Sjsg }
10091bb76ff1Sjsg
10101bb76ff1Sjsg static struct __guc_capture_parsed_output *
guc_capture_clone_node(struct intel_guc * guc,struct __guc_capture_parsed_output * original,u32 keep_reglist_mask)10111bb76ff1Sjsg guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original,
10121bb76ff1Sjsg u32 keep_reglist_mask)
10131bb76ff1Sjsg {
10141bb76ff1Sjsg struct __guc_capture_parsed_output *new;
10151bb76ff1Sjsg int i;
10161bb76ff1Sjsg
10171bb76ff1Sjsg new = guc_capture_get_prealloc_node(guc);
10181bb76ff1Sjsg if (!new)
10191bb76ff1Sjsg return NULL;
10201bb76ff1Sjsg if (!original)
10211bb76ff1Sjsg return new;
10221bb76ff1Sjsg
10231bb76ff1Sjsg new->is_partial = original->is_partial;
10241bb76ff1Sjsg
10251bb76ff1Sjsg /* copy reg-lists that we want to clone */
10261bb76ff1Sjsg for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
10271bb76ff1Sjsg if (keep_reglist_mask & BIT(i)) {
10281bb76ff1Sjsg GEM_BUG_ON(original->reginfo[i].num_regs >
10291bb76ff1Sjsg guc->capture->max_mmio_per_node);
10301bb76ff1Sjsg
10311bb76ff1Sjsg memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
10321bb76ff1Sjsg original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
10331bb76ff1Sjsg
10341bb76ff1Sjsg new->reginfo[i].num_regs = original->reginfo[i].num_regs;
10351bb76ff1Sjsg new->reginfo[i].vfid = original->reginfo[i].vfid;
10361bb76ff1Sjsg
10371bb76ff1Sjsg if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) {
10381bb76ff1Sjsg new->eng_class = original->eng_class;
10391bb76ff1Sjsg } else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
10401bb76ff1Sjsg new->eng_inst = original->eng_inst;
10411bb76ff1Sjsg new->guc_id = original->guc_id;
10421bb76ff1Sjsg new->lrca = original->lrca;
10431bb76ff1Sjsg }
10441bb76ff1Sjsg }
10451bb76ff1Sjsg }
10461bb76ff1Sjsg
10471bb76ff1Sjsg return new;
10481bb76ff1Sjsg }
10491bb76ff1Sjsg
10501bb76ff1Sjsg static void
__guc_capture_create_prealloc_nodes(struct intel_guc * guc)10511bb76ff1Sjsg __guc_capture_create_prealloc_nodes(struct intel_guc *guc)
10521bb76ff1Sjsg {
10531bb76ff1Sjsg struct __guc_capture_parsed_output *node = NULL;
10541bb76ff1Sjsg int i;
10551bb76ff1Sjsg
10561bb76ff1Sjsg for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
10571bb76ff1Sjsg node = guc_capture_alloc_one_node(guc);
10581bb76ff1Sjsg if (!node) {
1059*f005ef32Sjsg guc_warn(guc, "Register capture pre-alloc-cache failure\n");
10601bb76ff1Sjsg /* dont free the priors, use what we got and cleanup at shutdown */
10611bb76ff1Sjsg return;
10621bb76ff1Sjsg }
10631bb76ff1Sjsg guc_capture_add_node_to_cachelist(guc->capture, node);
10641bb76ff1Sjsg }
10651bb76ff1Sjsg }
10661bb76ff1Sjsg
10671bb76ff1Sjsg static int
guc_get_max_reglist_count(struct intel_guc * guc)10681bb76ff1Sjsg guc_get_max_reglist_count(struct intel_guc *guc)
10691bb76ff1Sjsg {
10701bb76ff1Sjsg int i, j, k, tmp, maxregcount = 0;
10711bb76ff1Sjsg
10721bb76ff1Sjsg for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
10731bb76ff1Sjsg for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
10741bb76ff1Sjsg for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
10751bb76ff1Sjsg if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0)
10761bb76ff1Sjsg continue;
10771bb76ff1Sjsg
10781bb76ff1Sjsg tmp = guc_cap_list_num_regs(guc->capture, i, j, k);
10791bb76ff1Sjsg if (tmp > maxregcount)
10801bb76ff1Sjsg maxregcount = tmp;
10811bb76ff1Sjsg }
10821bb76ff1Sjsg }
10831bb76ff1Sjsg }
10841bb76ff1Sjsg if (!maxregcount)
10851bb76ff1Sjsg maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
10861bb76ff1Sjsg
10871bb76ff1Sjsg return maxregcount;
10881bb76ff1Sjsg }
10891bb76ff1Sjsg
10901bb76ff1Sjsg static void
guc_capture_create_prealloc_nodes(struct intel_guc * guc)10911bb76ff1Sjsg guc_capture_create_prealloc_nodes(struct intel_guc *guc)
10921bb76ff1Sjsg {
10931bb76ff1Sjsg /* skip if we've already done the pre-alloc */
10941bb76ff1Sjsg if (guc->capture->max_mmio_per_node)
10951bb76ff1Sjsg return;
10961bb76ff1Sjsg
10971bb76ff1Sjsg guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
10981bb76ff1Sjsg __guc_capture_create_prealloc_nodes(guc);
10991bb76ff1Sjsg }
11001bb76ff1Sjsg
11011bb76ff1Sjsg static int
guc_capture_extract_reglists(struct intel_guc * guc,struct __guc_capture_bufstate * buf)11021bb76ff1Sjsg guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
11031bb76ff1Sjsg {
11041bb76ff1Sjsg struct guc_state_capture_group_header_t ghdr = {0};
11051bb76ff1Sjsg struct guc_state_capture_header_t hdr = {0};
11061bb76ff1Sjsg struct __guc_capture_parsed_output *node = NULL;
11071bb76ff1Sjsg struct guc_mmio_reg *regs = NULL;
11081bb76ff1Sjsg int i, numlists, numregs, ret = 0;
11091bb76ff1Sjsg enum guc_capture_type datatype;
11101bb76ff1Sjsg struct guc_mmio_reg tmp;
11111bb76ff1Sjsg bool is_partial = false;
11121bb76ff1Sjsg
11131bb76ff1Sjsg i = guc_capture_buf_cnt(buf);
11141bb76ff1Sjsg if (!i)
11151bb76ff1Sjsg return -ENODATA;
11161bb76ff1Sjsg if (i % sizeof(u32)) {
1117*f005ef32Sjsg guc_warn(guc, "Got mis-aligned register capture entries\n");
11181bb76ff1Sjsg ret = -EIO;
11191bb76ff1Sjsg goto bailout;
11201bb76ff1Sjsg }
11211bb76ff1Sjsg
11221bb76ff1Sjsg /* first get the capture group header */
11231bb76ff1Sjsg if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
11241bb76ff1Sjsg ret = -EIO;
11251bb76ff1Sjsg goto bailout;
11261bb76ff1Sjsg }
11271bb76ff1Sjsg /*
11281bb76ff1Sjsg * we would typically expect a layout as below where n would be expected to be
11291bb76ff1Sjsg * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
11301bb76ff1Sjsg * instances being reset together.
11311bb76ff1Sjsg * ____________________________________________
11321bb76ff1Sjsg * | Capture Group |
11331bb76ff1Sjsg * | ________________________________________ |
11341bb76ff1Sjsg * | | Capture Group Header: | |
11351bb76ff1Sjsg * | | - num_captures = 5 | |
11361bb76ff1Sjsg * | |______________________________________| |
11371bb76ff1Sjsg * | ________________________________________ |
11381bb76ff1Sjsg * | | Capture1: | |
11391bb76ff1Sjsg * | | Hdr: GLOBAL, numregs=a | |
11401bb76ff1Sjsg * | | ____________________________________ | |
11411bb76ff1Sjsg * | | | Reglist | | |
11421bb76ff1Sjsg * | | | - reg1, reg2, ... rega | | |
11431bb76ff1Sjsg * | | |__________________________________| | |
11441bb76ff1Sjsg * | |______________________________________| |
11451bb76ff1Sjsg * | ________________________________________ |
11461bb76ff1Sjsg * | | Capture2: | |
11471bb76ff1Sjsg * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
11481bb76ff1Sjsg * | | ____________________________________ | |
11491bb76ff1Sjsg * | | | Reglist | | |
11501bb76ff1Sjsg * | | | - reg1, reg2, ... regb | | |
11511bb76ff1Sjsg * | | |__________________________________| | |
11521bb76ff1Sjsg * | |______________________________________| |
11531bb76ff1Sjsg * | ________________________________________ |
11541bb76ff1Sjsg * | | Capture3: | |
11551bb76ff1Sjsg * | | Hdr: INSTANCE=RCS, numregs=c | |
11561bb76ff1Sjsg * | | ____________________________________ | |
11571bb76ff1Sjsg * | | | Reglist | | |
11581bb76ff1Sjsg * | | | - reg1, reg2, ... regc | | |
11591bb76ff1Sjsg * | | |__________________________________| | |
11601bb76ff1Sjsg * | |______________________________________| |
11611bb76ff1Sjsg * | ________________________________________ |
11621bb76ff1Sjsg * | | Capture4: | |
11631bb76ff1Sjsg * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
11641bb76ff1Sjsg * | | ____________________________________ | |
11651bb76ff1Sjsg * | | | Reglist | | |
11661bb76ff1Sjsg * | | | - reg1, reg2, ... regd | | |
11671bb76ff1Sjsg * | | |__________________________________| | |
11681bb76ff1Sjsg * | |______________________________________| |
11691bb76ff1Sjsg * | ________________________________________ |
11701bb76ff1Sjsg * | | Capture5: | |
11711bb76ff1Sjsg * | | Hdr: INSTANCE=CCS0, numregs=e | |
11721bb76ff1Sjsg * | | ____________________________________ | |
11731bb76ff1Sjsg * | | | Reglist | | |
11741bb76ff1Sjsg * | | | - reg1, reg2, ... rege | | |
11751bb76ff1Sjsg * | | |__________________________________| | |
11761bb76ff1Sjsg * | |______________________________________| |
11771bb76ff1Sjsg * |__________________________________________|
11781bb76ff1Sjsg */
11791bb76ff1Sjsg is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info);
11801bb76ff1Sjsg numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info);
11811bb76ff1Sjsg
11821bb76ff1Sjsg while (numlists--) {
11831bb76ff1Sjsg if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
11841bb76ff1Sjsg ret = -EIO;
11851bb76ff1Sjsg break;
11861bb76ff1Sjsg }
11871bb76ff1Sjsg
11881bb76ff1Sjsg datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info);
11891bb76ff1Sjsg if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
11901bb76ff1Sjsg /* unknown capture type - skip over to next capture set */
11911bb76ff1Sjsg numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
11921bb76ff1Sjsg while (numregs--) {
11931bb76ff1Sjsg if (guc_capture_log_get_register(guc, buf, &tmp)) {
11941bb76ff1Sjsg ret = -EIO;
11951bb76ff1Sjsg break;
11961bb76ff1Sjsg }
11971bb76ff1Sjsg }
11981bb76ff1Sjsg continue;
11991bb76ff1Sjsg } else if (node) {
12001bb76ff1Sjsg /*
12011bb76ff1Sjsg * Based on the current capture type and what we have so far,
12021bb76ff1Sjsg * decide if we should add the current node into the internal
12031bb76ff1Sjsg * linked list for match-up when i915_gpu_coredump calls later
12041bb76ff1Sjsg * (and alloc a blank node for the next set of reglists)
12051bb76ff1Sjsg * or continue with the same node or clone the current node
12061bb76ff1Sjsg * but only retain the global or class registers (such as the
12071bb76ff1Sjsg * case of dependent engine resets).
12081bb76ff1Sjsg */
12091bb76ff1Sjsg if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) {
12101bb76ff1Sjsg guc_capture_add_node_to_outlist(guc->capture, node);
12111bb76ff1Sjsg node = NULL;
12121bb76ff1Sjsg } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS &&
12131bb76ff1Sjsg node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) {
12141bb76ff1Sjsg /* Add to list, clone node and duplicate global list */
12151bb76ff1Sjsg guc_capture_add_node_to_outlist(guc->capture, node);
12161bb76ff1Sjsg node = guc_capture_clone_node(guc, node,
12171bb76ff1Sjsg GCAP_PARSED_REGLIST_INDEX_GLOBAL);
12181bb76ff1Sjsg } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE &&
12191bb76ff1Sjsg node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) {
12201bb76ff1Sjsg /* Add to list, clone node and duplicate global + class lists */
12211bb76ff1Sjsg guc_capture_add_node_to_outlist(guc->capture, node);
12221bb76ff1Sjsg node = guc_capture_clone_node(guc, node,
12231bb76ff1Sjsg (GCAP_PARSED_REGLIST_INDEX_GLOBAL |
12241bb76ff1Sjsg GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
12251bb76ff1Sjsg }
12261bb76ff1Sjsg }
12271bb76ff1Sjsg
12281bb76ff1Sjsg if (!node) {
12291bb76ff1Sjsg node = guc_capture_get_prealloc_node(guc);
12301bb76ff1Sjsg if (!node) {
12311bb76ff1Sjsg ret = -ENOMEM;
12321bb76ff1Sjsg break;
12331bb76ff1Sjsg }
12341bb76ff1Sjsg if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
1235*f005ef32Sjsg guc_dbg(guc, "Register capture missing global dump: %08x!\n",
12361bb76ff1Sjsg datatype);
12371bb76ff1Sjsg }
12381bb76ff1Sjsg node->is_partial = is_partial;
12391bb76ff1Sjsg node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner);
12401bb76ff1Sjsg switch (datatype) {
12411bb76ff1Sjsg case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
12421bb76ff1Sjsg node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
12431bb76ff1Sjsg node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info);
12441bb76ff1Sjsg node->lrca = hdr.lrca;
12451bb76ff1Sjsg node->guc_id = hdr.guc_id;
12461bb76ff1Sjsg break;
12471bb76ff1Sjsg case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
12481bb76ff1Sjsg node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
12491bb76ff1Sjsg break;
12501bb76ff1Sjsg default:
12511bb76ff1Sjsg break;
12521bb76ff1Sjsg }
12531bb76ff1Sjsg
12541bb76ff1Sjsg numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
12551bb76ff1Sjsg if (numregs > guc->capture->max_mmio_per_node) {
1256*f005ef32Sjsg guc_dbg(guc, "Register capture list extraction clipped by prealloc!\n");
12571bb76ff1Sjsg numregs = guc->capture->max_mmio_per_node;
12581bb76ff1Sjsg }
12591bb76ff1Sjsg node->reginfo[datatype].num_regs = numregs;
12601bb76ff1Sjsg regs = node->reginfo[datatype].regs;
12611bb76ff1Sjsg i = 0;
12621bb76ff1Sjsg while (numregs--) {
12631bb76ff1Sjsg if (guc_capture_log_get_register(guc, buf, ®s[i++])) {
12641bb76ff1Sjsg ret = -EIO;
12651bb76ff1Sjsg break;
12661bb76ff1Sjsg }
12671bb76ff1Sjsg }
12681bb76ff1Sjsg }
12691bb76ff1Sjsg
12701bb76ff1Sjsg bailout:
12711bb76ff1Sjsg if (node) {
12721bb76ff1Sjsg /* If we have data, add to linked list for match-up when i915_gpu_coredump calls */
12731bb76ff1Sjsg for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
12741bb76ff1Sjsg if (node->reginfo[i].regs) {
12751bb76ff1Sjsg guc_capture_add_node_to_outlist(guc->capture, node);
12761bb76ff1Sjsg node = NULL;
12771bb76ff1Sjsg break;
12781bb76ff1Sjsg }
12791bb76ff1Sjsg }
12801bb76ff1Sjsg if (node) /* else return it back to cache list */
12811bb76ff1Sjsg guc_capture_add_node_to_cachelist(guc->capture, node);
12821bb76ff1Sjsg }
12831bb76ff1Sjsg return ret;
12841bb76ff1Sjsg }
12851bb76ff1Sjsg
__guc_capture_flushlog_complete(struct intel_guc * guc)12861bb76ff1Sjsg static int __guc_capture_flushlog_complete(struct intel_guc *guc)
12871bb76ff1Sjsg {
12881bb76ff1Sjsg u32 action[] = {
12891bb76ff1Sjsg INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
12901bb76ff1Sjsg GUC_CAPTURE_LOG_BUFFER
12911bb76ff1Sjsg };
12921bb76ff1Sjsg
12931bb76ff1Sjsg return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
12941bb76ff1Sjsg
12951bb76ff1Sjsg }
12961bb76ff1Sjsg
__guc_capture_process_output(struct intel_guc * guc)12971bb76ff1Sjsg static void __guc_capture_process_output(struct intel_guc *guc)
12981bb76ff1Sjsg {
12991bb76ff1Sjsg unsigned int buffer_size, read_offset, write_offset, full_count;
13001bb76ff1Sjsg struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
13011bb76ff1Sjsg struct guc_log_buffer_state log_buf_state_local;
13021bb76ff1Sjsg struct guc_log_buffer_state *log_buf_state;
13031bb76ff1Sjsg struct __guc_capture_bufstate buf;
13041bb76ff1Sjsg void *src_data = NULL;
13051bb76ff1Sjsg bool new_overflow;
13061bb76ff1Sjsg int ret;
13071bb76ff1Sjsg
13081bb76ff1Sjsg log_buf_state = guc->log.buf_addr +
13091bb76ff1Sjsg (sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
13101bb76ff1Sjsg src_data = guc->log.buf_addr +
13111bb76ff1Sjsg intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER);
13121bb76ff1Sjsg
13131bb76ff1Sjsg /*
13141bb76ff1Sjsg * Make a copy of the state structure, inside GuC log buffer
13151bb76ff1Sjsg * (which is uncached mapped), on the stack to avoid reading
13161bb76ff1Sjsg * from it multiple times.
13171bb76ff1Sjsg */
13181bb76ff1Sjsg memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
13191bb76ff1Sjsg buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER);
13201bb76ff1Sjsg read_offset = log_buf_state_local.read_ptr;
13211bb76ff1Sjsg write_offset = log_buf_state_local.sampled_write_ptr;
13221bb76ff1Sjsg full_count = log_buf_state_local.buffer_full_cnt;
13231bb76ff1Sjsg
13241bb76ff1Sjsg /* Bookkeeping stuff */
13251bb76ff1Sjsg guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file;
13261bb76ff1Sjsg new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER,
13271bb76ff1Sjsg full_count);
13281bb76ff1Sjsg
13291bb76ff1Sjsg /* Now copy the actual logs. */
13301bb76ff1Sjsg if (unlikely(new_overflow)) {
13311bb76ff1Sjsg /* copy the whole buffer in case of overflow */
13321bb76ff1Sjsg read_offset = 0;
13331bb76ff1Sjsg write_offset = buffer_size;
13341bb76ff1Sjsg } else if (unlikely((read_offset > buffer_size) ||
13351bb76ff1Sjsg (write_offset > buffer_size))) {
1336*f005ef32Sjsg guc_err(guc, "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
1337*f005ef32Sjsg read_offset, buffer_size);
13381bb76ff1Sjsg /* copy whole buffer as offsets are unreliable */
13391bb76ff1Sjsg read_offset = 0;
13401bb76ff1Sjsg write_offset = buffer_size;
13411bb76ff1Sjsg }
13421bb76ff1Sjsg
13431bb76ff1Sjsg buf.size = buffer_size;
13441bb76ff1Sjsg buf.rd = read_offset;
13451bb76ff1Sjsg buf.wr = write_offset;
13461bb76ff1Sjsg buf.data = src_data;
13471bb76ff1Sjsg
13481bb76ff1Sjsg if (!uc->reset_in_progress) {
13491bb76ff1Sjsg do {
13501bb76ff1Sjsg ret = guc_capture_extract_reglists(guc, &buf);
13511bb76ff1Sjsg } while (ret >= 0);
13521bb76ff1Sjsg }
13531bb76ff1Sjsg
13541bb76ff1Sjsg /* Update the state of log buffer err-cap state */
13551bb76ff1Sjsg log_buf_state->read_ptr = write_offset;
13561bb76ff1Sjsg log_buf_state->flush_to_file = 0;
13571bb76ff1Sjsg __guc_capture_flushlog_complete(guc);
13581bb76ff1Sjsg }
13591bb76ff1Sjsg
13601bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
13611bb76ff1Sjsg
13621bb76ff1Sjsg static const char *
guc_capture_reg_to_str(const struct intel_guc * guc,u32 owner,u32 type,u32 class,u32 id,u32 offset,u32 * is_ext)13631bb76ff1Sjsg guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
13641bb76ff1Sjsg u32 class, u32 id, u32 offset, u32 *is_ext)
13651bb76ff1Sjsg {
13661bb76ff1Sjsg const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
13671bb76ff1Sjsg struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
13681bb76ff1Sjsg const struct __guc_mmio_reg_descr_group *match;
13691bb76ff1Sjsg struct __guc_mmio_reg_descr_group *matchext;
13701bb76ff1Sjsg int j;
13711bb76ff1Sjsg
13721bb76ff1Sjsg *is_ext = 0;
13731bb76ff1Sjsg if (!reglists)
13741bb76ff1Sjsg return NULL;
13751bb76ff1Sjsg
13761bb76ff1Sjsg match = guc_capture_get_one_list(reglists, owner, type, id);
13771bb76ff1Sjsg if (!match)
13781bb76ff1Sjsg return NULL;
13791bb76ff1Sjsg
13801bb76ff1Sjsg for (j = 0; j < match->num_regs; ++j) {
13811bb76ff1Sjsg if (offset == match->list[j].reg.reg)
13821bb76ff1Sjsg return match->list[j].regname;
13831bb76ff1Sjsg }
13841bb76ff1Sjsg if (extlists) {
13851bb76ff1Sjsg matchext = guc_capture_get_one_ext_list(extlists, owner, type, id);
13861bb76ff1Sjsg if (!matchext)
13871bb76ff1Sjsg return NULL;
13881bb76ff1Sjsg for (j = 0; j < matchext->num_regs; ++j) {
13891bb76ff1Sjsg if (offset == matchext->extlist[j].reg.reg) {
13901bb76ff1Sjsg *is_ext = 1;
13911bb76ff1Sjsg return matchext->extlist[j].regname;
13921bb76ff1Sjsg }
13931bb76ff1Sjsg }
13941bb76ff1Sjsg }
13951bb76ff1Sjsg
13961bb76ff1Sjsg return NULL;
13971bb76ff1Sjsg }
13981bb76ff1Sjsg
13991bb76ff1Sjsg #define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
14001bb76ff1Sjsg do { \
14011bb76ff1Sjsg i915_error_printf(ebuf, " i915-Eng-Name: %s command stream\n", \
14021bb76ff1Sjsg (eng)->name); \
14031bb76ff1Sjsg i915_error_printf(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
14041bb76ff1Sjsg i915_error_printf(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
14051bb76ff1Sjsg i915_error_printf(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
14061bb76ff1Sjsg (eng)->logical_mask); \
14071bb76ff1Sjsg } while (0)
14081bb76ff1Sjsg
14091bb76ff1Sjsg #define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
14101bb76ff1Sjsg do { \
14111bb76ff1Sjsg i915_error_printf(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
14121bb76ff1Sjsg (node)->eng_inst); \
14131bb76ff1Sjsg i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
14141bb76ff1Sjsg i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
14151bb76ff1Sjsg } while (0)
14161bb76ff1Sjsg
intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf * ebuf,const struct intel_engine_coredump * ee)14171bb76ff1Sjsg int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
14181bb76ff1Sjsg const struct intel_engine_coredump *ee)
14191bb76ff1Sjsg {
14201bb76ff1Sjsg const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
14211bb76ff1Sjsg "full-capture",
14221bb76ff1Sjsg "partial-capture"
14231bb76ff1Sjsg };
14241bb76ff1Sjsg const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = {
14251bb76ff1Sjsg "Global",
14261bb76ff1Sjsg "Engine-Class",
14271bb76ff1Sjsg "Engine-Instance"
14281bb76ff1Sjsg };
14291bb76ff1Sjsg struct intel_guc_state_capture *cap;
14301bb76ff1Sjsg struct __guc_capture_parsed_output *node;
14311bb76ff1Sjsg struct intel_engine_cs *eng;
14321bb76ff1Sjsg struct guc_mmio_reg *regs;
14331bb76ff1Sjsg struct intel_guc *guc;
14341bb76ff1Sjsg const char *str;
14351bb76ff1Sjsg int numregs, i, j;
14361bb76ff1Sjsg u32 is_ext;
14371bb76ff1Sjsg
14381bb76ff1Sjsg if (!ebuf || !ee)
14391bb76ff1Sjsg return -EINVAL;
144041b6a0afSjsg cap = ee->guc_capture;
14411bb76ff1Sjsg if (!cap || !ee->engine)
14421bb76ff1Sjsg return -ENODEV;
14431bb76ff1Sjsg
14441bb76ff1Sjsg guc = &ee->engine->gt->uc.guc;
14451bb76ff1Sjsg
14461bb76ff1Sjsg i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
14471bb76ff1Sjsg ee->engine->name);
14481bb76ff1Sjsg
14491bb76ff1Sjsg node = ee->guc_capture_node;
14501bb76ff1Sjsg if (!node) {
14511bb76ff1Sjsg i915_error_printf(ebuf, " No matching ee-node\n");
14521bb76ff1Sjsg return 0;
14531bb76ff1Sjsg }
14541bb76ff1Sjsg
14551bb76ff1Sjsg i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
14561bb76ff1Sjsg
14571bb76ff1Sjsg for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
14581bb76ff1Sjsg i915_error_printf(ebuf, " RegListType: %s\n",
14591bb76ff1Sjsg datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
14601bb76ff1Sjsg i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
14611bb76ff1Sjsg
14621bb76ff1Sjsg switch (i) {
14631bb76ff1Sjsg case GUC_CAPTURE_LIST_TYPE_GLOBAL:
14641bb76ff1Sjsg default:
14651bb76ff1Sjsg break;
14661bb76ff1Sjsg case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
14671bb76ff1Sjsg i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
14681bb76ff1Sjsg i915_error_printf(ebuf, " i915-Eng-Class: %d\n",
14691bb76ff1Sjsg guc_class_to_engine_class(node->eng_class));
14701bb76ff1Sjsg break;
14711bb76ff1Sjsg case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
14721bb76ff1Sjsg eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
14731bb76ff1Sjsg if (eng)
14741bb76ff1Sjsg GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
14751bb76ff1Sjsg else
14761bb76ff1Sjsg i915_error_printf(ebuf, " i915-Eng-Lookup Fail!\n");
14771bb76ff1Sjsg GCAP_PRINT_GUC_INST_INFO(ebuf, node);
14781bb76ff1Sjsg break;
14791bb76ff1Sjsg }
14801bb76ff1Sjsg
14811bb76ff1Sjsg numregs = node->reginfo[i].num_regs;
14821bb76ff1Sjsg i915_error_printf(ebuf, " NumRegs: %d\n", numregs);
14831bb76ff1Sjsg j = 0;
14841bb76ff1Sjsg while (numregs--) {
14851bb76ff1Sjsg regs = node->reginfo[i].regs;
14861bb76ff1Sjsg str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
14871bb76ff1Sjsg node->eng_class, 0, regs[j].offset, &is_ext);
14881bb76ff1Sjsg if (!str)
14891bb76ff1Sjsg i915_error_printf(ebuf, " REG-0x%08x", regs[j].offset);
14901bb76ff1Sjsg else
14911bb76ff1Sjsg i915_error_printf(ebuf, " %s", str);
14921bb76ff1Sjsg if (is_ext)
14931bb76ff1Sjsg i915_error_printf(ebuf, "[%ld][%ld]",
14941bb76ff1Sjsg FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
14951bb76ff1Sjsg FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
14961bb76ff1Sjsg i915_error_printf(ebuf, ": 0x%08x\n", regs[j].value);
14971bb76ff1Sjsg ++j;
14981bb76ff1Sjsg }
14991bb76ff1Sjsg }
15001bb76ff1Sjsg return 0;
15011bb76ff1Sjsg }
15021bb76ff1Sjsg
15031bb76ff1Sjsg #endif //CONFIG_DRM_I915_CAPTURE_ERROR
15041bb76ff1Sjsg
guc_capture_find_ecode(struct intel_engine_coredump * ee)1505fffa8a6aSjsg static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
1506fffa8a6aSjsg {
1507fffa8a6aSjsg struct gcap_reg_list_info *reginfo;
1508fffa8a6aSjsg struct guc_mmio_reg *regs;
1509fffa8a6aSjsg i915_reg_t reg_ipehr = RING_IPEHR(0);
1510fffa8a6aSjsg i915_reg_t reg_instdone = RING_INSTDONE(0);
1511fffa8a6aSjsg int i;
1512fffa8a6aSjsg
1513fffa8a6aSjsg if (!ee->guc_capture_node)
1514fffa8a6aSjsg return;
1515fffa8a6aSjsg
1516fffa8a6aSjsg reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
1517fffa8a6aSjsg regs = reginfo->regs;
1518fffa8a6aSjsg for (i = 0; i < reginfo->num_regs; i++) {
1519fffa8a6aSjsg if (regs[i].offset == reg_ipehr.reg)
1520fffa8a6aSjsg ee->ipehr = regs[i].value;
1521fffa8a6aSjsg else if (regs[i].offset == reg_instdone.reg)
1522fffa8a6aSjsg ee->instdone.instdone = regs[i].value;
1523fffa8a6aSjsg }
1524fffa8a6aSjsg }
1525fffa8a6aSjsg
intel_guc_capture_free_node(struct intel_engine_coredump * ee)15261bb76ff1Sjsg void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
15271bb76ff1Sjsg {
15281bb76ff1Sjsg if (!ee || !ee->guc_capture_node)
15291bb76ff1Sjsg return;
15301bb76ff1Sjsg
153141b6a0afSjsg guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node);
153241b6a0afSjsg ee->guc_capture = NULL;
15331bb76ff1Sjsg ee->guc_capture_node = NULL;
15341bb76ff1Sjsg }
15351bb76ff1Sjsg
intel_guc_capture_is_matching_engine(struct intel_gt * gt,struct intel_context * ce,struct intel_engine_cs * engine)1536*f005ef32Sjsg bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
1537*f005ef32Sjsg struct intel_context *ce,
1538*f005ef32Sjsg struct intel_engine_cs *engine)
1539*f005ef32Sjsg {
1540*f005ef32Sjsg struct __guc_capture_parsed_output *n;
1541*f005ef32Sjsg struct intel_guc *guc;
1542*f005ef32Sjsg
1543*f005ef32Sjsg if (!gt || !ce || !engine)
1544*f005ef32Sjsg return false;
1545*f005ef32Sjsg
1546*f005ef32Sjsg guc = >->uc.guc;
1547*f005ef32Sjsg if (!guc->capture)
1548*f005ef32Sjsg return false;
1549*f005ef32Sjsg
1550*f005ef32Sjsg /*
1551*f005ef32Sjsg * Look for a matching GuC reported error capture node from
1552*f005ef32Sjsg * the internal output link-list based on lrca, guc-id and engine
1553*f005ef32Sjsg * identification.
1554*f005ef32Sjsg */
1555*f005ef32Sjsg list_for_each_entry(n, &guc->capture->outlist, link) {
1556*f005ef32Sjsg if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(engine->guc_id) &&
1557*f005ef32Sjsg n->eng_class == GUC_ID_TO_ENGINE_CLASS(engine->guc_id) &&
1558*f005ef32Sjsg n->guc_id == ce->guc_id.id &&
1559*f005ef32Sjsg (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
1560*f005ef32Sjsg return true;
1561*f005ef32Sjsg }
1562*f005ef32Sjsg
1563*f005ef32Sjsg return false;
1564*f005ef32Sjsg }
1565*f005ef32Sjsg
intel_guc_capture_get_matching_node(struct intel_gt * gt,struct intel_engine_coredump * ee,struct intel_context * ce)15661bb76ff1Sjsg void intel_guc_capture_get_matching_node(struct intel_gt *gt,
15671bb76ff1Sjsg struct intel_engine_coredump *ee,
15681bb76ff1Sjsg struct intel_context *ce)
15691bb76ff1Sjsg {
15701bb76ff1Sjsg struct __guc_capture_parsed_output *n, *ntmp;
15711bb76ff1Sjsg struct intel_guc *guc;
15721bb76ff1Sjsg
15731bb76ff1Sjsg if (!gt || !ee || !ce)
15741bb76ff1Sjsg return;
15751bb76ff1Sjsg
15761bb76ff1Sjsg guc = >->uc.guc;
15771bb76ff1Sjsg if (!guc->capture)
15781bb76ff1Sjsg return;
15791bb76ff1Sjsg
15801bb76ff1Sjsg GEM_BUG_ON(ee->guc_capture_node);
1581*f005ef32Sjsg
15821bb76ff1Sjsg /*
15831bb76ff1Sjsg * Look for a matching GuC reported error capture node from
15841bb76ff1Sjsg * the internal output link-list based on lrca, guc-id and engine
15851bb76ff1Sjsg * identification.
15861bb76ff1Sjsg */
15871bb76ff1Sjsg list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
15881bb76ff1Sjsg if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
15891bb76ff1Sjsg n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
1590*f005ef32Sjsg n->guc_id == ce->guc_id.id &&
1591*f005ef32Sjsg (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
15921bb76ff1Sjsg list_del(&n->link);
15931bb76ff1Sjsg ee->guc_capture_node = n;
159441b6a0afSjsg ee->guc_capture = guc->capture;
1595fffa8a6aSjsg guc_capture_find_ecode(ee);
15961bb76ff1Sjsg return;
15971bb76ff1Sjsg }
15981bb76ff1Sjsg }
1599*f005ef32Sjsg
1600*f005ef32Sjsg guc_warn(guc, "No register capture node found for 0x%04X / 0x%08X\n",
1601*f005ef32Sjsg ce->guc_id.id, ce->lrc.lrca);
16021bb76ff1Sjsg }
16031bb76ff1Sjsg
intel_guc_capture_process(struct intel_guc * guc)16041bb76ff1Sjsg void intel_guc_capture_process(struct intel_guc *guc)
16051bb76ff1Sjsg {
16061bb76ff1Sjsg if (guc->capture)
16071bb76ff1Sjsg __guc_capture_process_output(guc);
16081bb76ff1Sjsg }
16091bb76ff1Sjsg
16101bb76ff1Sjsg static void
guc_capture_free_ads_cache(struct intel_guc_state_capture * gc)16111bb76ff1Sjsg guc_capture_free_ads_cache(struct intel_guc_state_capture *gc)
16121bb76ff1Sjsg {
16131bb76ff1Sjsg int i, j, k;
16141bb76ff1Sjsg struct __guc_capture_ads_cache *cache;
16151bb76ff1Sjsg
16161bb76ff1Sjsg for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
16171bb76ff1Sjsg for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
16181bb76ff1Sjsg for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
16191bb76ff1Sjsg cache = &gc->ads_cache[i][j][k];
16201bb76ff1Sjsg if (cache->is_valid)
16211bb76ff1Sjsg kfree(cache->ptr);
16221bb76ff1Sjsg }
16231bb76ff1Sjsg }
16241bb76ff1Sjsg }
16251bb76ff1Sjsg kfree(gc->ads_null_cache);
16261bb76ff1Sjsg }
16271bb76ff1Sjsg
intel_guc_capture_destroy(struct intel_guc * guc)16281bb76ff1Sjsg void intel_guc_capture_destroy(struct intel_guc *guc)
16291bb76ff1Sjsg {
16301bb76ff1Sjsg if (!guc->capture)
16311bb76ff1Sjsg return;
16321bb76ff1Sjsg
16331bb76ff1Sjsg guc_capture_free_ads_cache(guc->capture);
16341bb76ff1Sjsg
16351bb76ff1Sjsg guc_capture_delete_prealloc_nodes(guc);
16361bb76ff1Sjsg
16371bb76ff1Sjsg guc_capture_free_extlists(guc->capture->extlists);
16381bb76ff1Sjsg kfree(guc->capture->extlists);
16391bb76ff1Sjsg
16401bb76ff1Sjsg kfree(guc->capture);
16411bb76ff1Sjsg guc->capture = NULL;
16421bb76ff1Sjsg }
16431bb76ff1Sjsg
intel_guc_capture_init(struct intel_guc * guc)16441bb76ff1Sjsg int intel_guc_capture_init(struct intel_guc *guc)
16451bb76ff1Sjsg {
16461bb76ff1Sjsg guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL);
16471bb76ff1Sjsg if (!guc->capture)
16481bb76ff1Sjsg return -ENOMEM;
16491bb76ff1Sjsg
16501bb76ff1Sjsg guc->capture->reglists = guc_capture_get_device_reglist(guc);
16511bb76ff1Sjsg
16521bb76ff1Sjsg INIT_LIST_HEAD(&guc->capture->outlist);
16531bb76ff1Sjsg INIT_LIST_HEAD(&guc->capture->cachelist);
16541bb76ff1Sjsg
16551bb76ff1Sjsg check_guc_capture_size(guc);
16561bb76ff1Sjsg
16571bb76ff1Sjsg return 0;
16581bb76ff1Sjsg }
1659