1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright(c) 2019-2022, Intel Corporation. All rights reserved. 4 */ 5 6 #include <linux/irq.h> 7 #include <linux/mei_aux.h> 8 #include "i915_drv.h" 9 #include "i915_reg.h" 10 #include "gem/i915_gem_region.h" 11 #include "gt/intel_gsc.h" 12 #include "gt/intel_gt.h" 13 14 #define GSC_BAR_LENGTH 0x00000FFC 15 16 #ifdef notyet 17 static void gsc_irq_mask(struct irq_data *d) 18 { 19 /* generic irq handling */ 20 } 21 22 static void gsc_irq_unmask(struct irq_data *d) 23 { 24 /* generic irq handling */ 25 } 26 27 static struct irq_chip gsc_irq_chip = { 28 .name = "gsc_irq_chip", 29 .irq_mask = gsc_irq_mask, 30 .irq_unmask = gsc_irq_unmask, 31 }; 32 #endif 33 34 static int gsc_irq_init(int irq) 35 { 36 STUB(); 37 return -ENOSYS; 38 #ifdef notyet 39 irq_set_chip_and_handler_name(irq, &gsc_irq_chip, 40 handle_simple_irq, "gsc_irq_handler"); 41 42 return irq_set_chip_data(irq, NULL); 43 #endif 44 } 45 46 static int 47 gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size) 48 { 49 struct intel_gt *gt = gsc_to_gt(gsc); 50 struct drm_i915_gem_object *obj; 51 int err; 52 53 obj = i915_gem_object_create_lmem(gt->i915, size, 54 I915_BO_ALLOC_CONTIGUOUS | 55 I915_BO_ALLOC_CPU_CLEAR); 56 if (IS_ERR(obj)) { 57 drm_err(>->i915->drm, "Failed to allocate gsc memory\n"); 58 return PTR_ERR(obj); 59 } 60 61 err = i915_gem_object_pin_pages_unlocked(obj); 62 if (err) { 63 drm_err(>->i915->drm, "Failed to pin pages for gsc memory\n"); 64 goto out_put; 65 } 66 67 intf->gem_obj = obj; 68 69 return 0; 70 71 out_put: 72 i915_gem_object_put(obj); 73 return err; 74 } 75 76 static void gsc_ext_om_destroy(struct intel_gsc_intf *intf) 77 { 78 struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj); 79 80 if (!obj) 81 return; 82 83 if (i915_gem_object_has_pinned_pages(obj)) 84 i915_gem_object_unpin_pages(obj); 85 86 i915_gem_object_put(obj); 87 } 88 89 struct gsc_def { 90 const char *name; 91 unsigned long bar; 92 size_t bar_size; 93 bool use_polling; 94 bool slow_firmware; 95 size_t lmem_size; 96 }; 97 98 /* gsc resources and definitions (HECI1 and HECI2) */ 99 static const struct gsc_def gsc_def_dg1[] = { 100 { 101 /* HECI1 not yet implemented. */ 102 }, 103 { 104 .name = "mei-gscfi", 105 .bar = DG1_GSC_HECI2_BASE, 106 .bar_size = GSC_BAR_LENGTH, 107 } 108 }; 109 110 static const struct gsc_def gsc_def_xehpsdv[] = { 111 { 112 /* HECI1 not enabled on the device. */ 113 }, 114 { 115 .name = "mei-gscfi", 116 .bar = DG1_GSC_HECI2_BASE, 117 .bar_size = GSC_BAR_LENGTH, 118 .use_polling = true, 119 .slow_firmware = true, 120 } 121 }; 122 123 static const struct gsc_def gsc_def_dg2[] = { 124 { 125 .name = "mei-gsc", 126 .bar = DG2_GSC_HECI1_BASE, 127 .bar_size = GSC_BAR_LENGTH, 128 .lmem_size = SZ_4M, 129 }, 130 { 131 .name = "mei-gscfi", 132 .bar = DG2_GSC_HECI2_BASE, 133 .bar_size = GSC_BAR_LENGTH, 134 } 135 }; 136 137 static void gsc_release_dev(struct device *dev) 138 { 139 STUB(); 140 #ifdef notyet 141 struct auxiliary_device *aux_dev = to_auxiliary_dev(dev); 142 struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev); 143 144 kfree(adev); 145 #endif 146 } 147 148 static void gsc_destroy_one(struct drm_i915_private *i915, 149 struct intel_gsc *gsc, unsigned int intf_id) 150 { 151 STUB(); 152 #ifdef notyet 153 struct intel_gsc_intf *intf = &gsc->intf[intf_id]; 154 155 if (intf->adev) { 156 auxiliary_device_delete(&intf->adev->aux_dev); 157 auxiliary_device_uninit(&intf->adev->aux_dev); 158 intf->adev = NULL; 159 } 160 161 if (intf->irq >= 0) 162 irq_free_desc(intf->irq); 163 intf->irq = -1; 164 165 gsc_ext_om_destroy(intf); 166 #endif 167 } 168 169 static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc, 170 unsigned int intf_id) 171 { 172 STUB(); 173 #ifdef notyet 174 struct pci_dev *pdev = i915->drm.pdev; 175 struct mei_aux_device *adev; 176 struct auxiliary_device *aux_dev; 177 const struct gsc_def *def; 178 struct intel_gsc_intf *intf = &gsc->intf[intf_id]; 179 int ret; 180 181 intf->irq = -1; 182 intf->id = intf_id; 183 184 if (intf_id == 0 && !HAS_HECI_PXP(i915)) 185 return; 186 187 if (IS_DG1(i915)) { 188 def = &gsc_def_dg1[intf_id]; 189 } else if (IS_XEHPSDV(i915)) { 190 def = &gsc_def_xehpsdv[intf_id]; 191 } else if (IS_DG2(i915)) { 192 def = &gsc_def_dg2[intf_id]; 193 } else { 194 drm_warn_once(&i915->drm, "Unknown platform\n"); 195 return; 196 } 197 198 if (!def->name) { 199 drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1); 200 return; 201 } 202 203 /* skip irq initialization */ 204 if (def->use_polling) 205 goto add_device; 206 207 intf->irq = irq_alloc_desc(0); 208 if (intf->irq < 0) { 209 drm_err(&i915->drm, "gsc irq error %d\n", intf->irq); 210 goto fail; 211 } 212 213 ret = gsc_irq_init(intf->irq); 214 if (ret < 0) { 215 drm_err(&i915->drm, "gsc irq init failed %d\n", ret); 216 goto fail; 217 } 218 219 add_device: 220 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 221 if (!adev) 222 goto fail; 223 224 if (def->lmem_size) { 225 drm_dbg(&i915->drm, "setting up GSC lmem\n"); 226 227 if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) { 228 drm_err(&i915->drm, "setting up gsc extended operational memory failed\n"); 229 kfree(adev); 230 goto fail; 231 } 232 233 adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0); 234 adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size; 235 } 236 237 adev->irq = intf->irq; 238 adev->bar.parent = &pdev->resource[0]; 239 adev->bar.start = def->bar + pdev->resource[0].start; 240 adev->bar.end = adev->bar.start + def->bar_size - 1; 241 adev->bar.flags = IORESOURCE_MEM; 242 adev->bar.desc = IORES_DESC_NONE; 243 adev->slow_firmware = def->slow_firmware; 244 245 aux_dev = &adev->aux_dev; 246 aux_dev->name = def->name; 247 aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | 248 PCI_DEVID(pdev->bus->number, pdev->devfn); 249 aux_dev->dev.parent = &pdev->dev; 250 aux_dev->dev.release = gsc_release_dev; 251 252 ret = auxiliary_device_init(aux_dev); 253 if (ret < 0) { 254 drm_err(&i915->drm, "gsc aux init failed %d\n", ret); 255 kfree(adev); 256 goto fail; 257 } 258 259 ret = auxiliary_device_add(aux_dev); 260 if (ret < 0) { 261 drm_err(&i915->drm, "gsc aux add failed %d\n", ret); 262 /* adev will be freed with the put_device() and .release sequence */ 263 auxiliary_device_uninit(aux_dev); 264 goto fail; 265 } 266 intf->adev = adev; 267 268 return; 269 fail: 270 gsc_destroy_one(i915, gsc, intf->id); 271 #endif 272 } 273 274 static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) 275 { 276 STUB(); 277 #ifdef notyet 278 int ret; 279 280 if (intf_id >= INTEL_GSC_NUM_INTERFACES) { 281 drm_warn_once(>->i915->drm, "GSC irq: intf_id %d is out of range", intf_id); 282 return; 283 } 284 285 if (!HAS_HECI_GSC(gt->i915)) { 286 drm_warn_once(>->i915->drm, "GSC irq: not supported"); 287 return; 288 } 289 290 if (gt->gsc.intf[intf_id].irq < 0) 291 return; 292 293 ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); 294 if (ret) 295 drm_err_ratelimited(>->i915->drm, "error handling GSC irq: %d\n", ret); 296 #endif 297 } 298 299 void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir) 300 { 301 if (iir & GSC_IRQ_INTF(0)) 302 gsc_irq_handler(gt, 0); 303 if (iir & GSC_IRQ_INTF(1)) 304 gsc_irq_handler(gt, 1); 305 } 306 307 void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915) 308 { 309 unsigned int i; 310 311 if (!HAS_HECI_GSC(i915)) 312 return; 313 314 for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) 315 gsc_init_one(i915, gsc, i); 316 } 317 318 void intel_gsc_fini(struct intel_gsc *gsc) 319 { 320 struct intel_gt *gt = gsc_to_gt(gsc); 321 unsigned int i; 322 323 if (!HAS_HECI_GSC(gt->i915)) 324 return; 325 326 for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) 327 gsc_destroy_one(gt->i915, gsc, i); 328 } 329