1 /* 2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. 3 * 4 * Author: Tony Li <tony.li@freescale.com> 5 * Jason Jin <Jason.jin@freescale.com> 6 * 7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 of the 12 * License. 13 * 14 */ 15 #include <linux/irq.h> 16 #include <linux/bootmem.h> 17 #include <linux/msi.h> 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 #include <linux/of_platform.h> 21 #include <sysdev/fsl_soc.h> 22 #include <asm/prom.h> 23 #include <asm/hw_irq.h> 24 #include <asm/ppc-pci.h> 25 #include <asm/mpic.h> 26 #include <asm/fsl_hcalls.h> 27 28 #include "fsl_msi.h" 29 #include "fsl_pci.h" 30 31 #define MSIIR_OFFSET_MASK 0xfffff 32 #define MSIIR_IBS_SHIFT 0 33 #define MSIIR_SRS_SHIFT 5 34 #define MSIIR1_IBS_SHIFT 4 35 #define MSIIR1_SRS_SHIFT 0 36 #define MSI_SRS_MASK 0xf 37 #define MSI_IBS_MASK 0x1f 38 39 #define msi_hwirq(msi, msir_index, intr_index) \ 40 ((msir_index) << (msi)->srs_shift | \ 41 ((intr_index) << (msi)->ibs_shift)) 42 43 static LIST_HEAD(msi_head); 44 45 struct fsl_msi_feature { 46 u32 fsl_pic_ip; 47 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */ 48 }; 49 50 struct fsl_msi_cascade_data { 51 struct fsl_msi *msi_data; 52 int index; 53 }; 54 55 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg) 56 { 57 return in_be32(base + (reg >> 2)); 58 } 59 60 /* 61 * We do not need this actually. The MSIR register has been read once 62 * in the cascade interrupt. So, this MSI interrupt has been acked 63 */ 64 static void fsl_msi_end_irq(struct irq_data *d) 65 { 66 } 67 68 static struct irq_chip fsl_msi_chip = { 69 .irq_mask = mask_msi_irq, 70 .irq_unmask = unmask_msi_irq, 71 .irq_ack = fsl_msi_end_irq, 72 .name = "FSL-MSI", 73 }; 74 75 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq, 76 irq_hw_number_t hw) 77 { 78 struct fsl_msi *msi_data = h->host_data; 79 struct irq_chip *chip = &fsl_msi_chip; 80 81 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING); 82 83 irq_set_chip_data(virq, msi_data); 84 irq_set_chip_and_handler(virq, chip, handle_edge_irq); 85 86 return 0; 87 } 88 89 static const struct irq_domain_ops fsl_msi_host_ops = { 90 .map = fsl_msi_host_map, 91 }; 92 93 static int fsl_msi_init_allocator(struct fsl_msi *msi_data) 94 { 95 int rc, hwirq; 96 97 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX, 98 msi_data->irqhost->of_node); 99 if (rc) 100 return rc; 101 102 /* 103 * Reserve all the hwirqs 104 * The available hwirqs will be released in fsl_msi_setup_hwirq() 105 */ 106 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++) 107 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq); 108 109 return 0; 110 } 111 112 static void fsl_teardown_msi_irqs(struct pci_dev *pdev) 113 { 114 struct msi_desc *entry; 115 struct fsl_msi *msi_data; 116 117 list_for_each_entry(entry, &pdev->msi_list, list) { 118 if (entry->irq == NO_IRQ) 119 continue; 120 msi_data = irq_get_chip_data(entry->irq); 121 irq_set_msi_desc(entry->irq, NULL); 122 msi_bitmap_free_hwirqs(&msi_data->bitmap, 123 virq_to_hw(entry->irq), 1); 124 irq_dispose_mapping(entry->irq); 125 } 126 127 return; 128 } 129 130 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, 131 struct msi_msg *msg, 132 struct fsl_msi *fsl_msi_data) 133 { 134 struct fsl_msi *msi_data = fsl_msi_data; 135 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 136 u64 address; /* Physical address of the MSIIR */ 137 int len; 138 const __be64 *reg; 139 140 /* If the msi-address-64 property exists, then use it */ 141 reg = of_get_property(hose->dn, "msi-address-64", &len); 142 if (reg && (len == sizeof(u64))) 143 address = be64_to_cpup(reg); 144 else 145 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset; 146 147 msg->address_lo = lower_32_bits(address); 148 msg->address_hi = upper_32_bits(address); 149 150 msg->data = hwirq; 151 152 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__, 153 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK, 154 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK); 155 } 156 157 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 158 { 159 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 160 struct device_node *np; 161 phandle phandle = 0; 162 int rc, hwirq = -ENOMEM; 163 unsigned int virq; 164 struct msi_desc *entry; 165 struct msi_msg msg; 166 struct fsl_msi *msi_data; 167 168 if (type == PCI_CAP_ID_MSIX) 169 pr_debug("fslmsi: MSI-X untested, trying anyway.\n"); 170 171 /* 172 * If the PCI node has an fsl,msi property, then we need to use it 173 * to find the specific MSI. 174 */ 175 np = of_parse_phandle(hose->dn, "fsl,msi", 0); 176 if (np) { 177 if (of_device_is_compatible(np, "fsl,mpic-msi") || 178 of_device_is_compatible(np, "fsl,vmpic-msi")) 179 phandle = np->phandle; 180 else { 181 dev_err(&pdev->dev, 182 "node %s has an invalid fsl,msi phandle %u\n", 183 hose->dn->full_name, np->phandle); 184 return -EINVAL; 185 } 186 } 187 188 list_for_each_entry(entry, &pdev->msi_list, list) { 189 /* 190 * Loop over all the MSI devices until we find one that has an 191 * available interrupt. 192 */ 193 list_for_each_entry(msi_data, &msi_head, list) { 194 /* 195 * If the PCI node has an fsl,msi property, then we 196 * restrict our search to the corresponding MSI node. 197 * The simplest way is to skip over MSI nodes with the 198 * wrong phandle. Under the Freescale hypervisor, this 199 * has the additional benefit of skipping over MSI 200 * nodes that are not mapped in the PAMU. 201 */ 202 if (phandle && (phandle != msi_data->phandle)) 203 continue; 204 205 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); 206 if (hwirq >= 0) 207 break; 208 } 209 210 if (hwirq < 0) { 211 rc = hwirq; 212 dev_err(&pdev->dev, "could not allocate MSI interrupt\n"); 213 goto out_free; 214 } 215 216 virq = irq_create_mapping(msi_data->irqhost, hwirq); 217 218 if (virq == NO_IRQ) { 219 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq); 220 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); 221 rc = -ENOSPC; 222 goto out_free; 223 } 224 /* chip_data is msi_data via host->hostdata in host->map() */ 225 irq_set_msi_desc(virq, entry); 226 227 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); 228 write_msi_msg(virq, &msg); 229 } 230 return 0; 231 232 out_free: 233 /* free by the caller of this function */ 234 return rc; 235 } 236 237 static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) 238 { 239 struct irq_chip *chip = irq_desc_get_chip(desc); 240 struct irq_data *idata = irq_desc_get_irq_data(desc); 241 unsigned int cascade_irq; 242 struct fsl_msi *msi_data; 243 int msir_index = -1; 244 u32 msir_value = 0; 245 u32 intr_index; 246 u32 have_shift = 0; 247 struct fsl_msi_cascade_data *cascade_data; 248 249 cascade_data = irq_get_handler_data(irq); 250 msi_data = cascade_data->msi_data; 251 252 raw_spin_lock(&desc->lock); 253 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { 254 if (chip->irq_mask_ack) 255 chip->irq_mask_ack(idata); 256 else { 257 chip->irq_mask(idata); 258 chip->irq_ack(idata); 259 } 260 } 261 262 if (unlikely(irqd_irq_inprogress(idata))) 263 goto unlock; 264 265 msir_index = cascade_data->index; 266 267 if (msir_index >= NR_MSI_REG_MAX) 268 cascade_irq = NO_IRQ; 269 270 irqd_set_chained_irq_inprogress(idata); 271 switch (msi_data->feature & FSL_PIC_IP_MASK) { 272 case FSL_PIC_IP_MPIC: 273 msir_value = fsl_msi_read(msi_data->msi_regs, 274 msir_index * 0x10); 275 break; 276 case FSL_PIC_IP_IPIC: 277 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); 278 break; 279 #ifdef CONFIG_EPAPR_PARAVIRT 280 case FSL_PIC_IP_VMPIC: { 281 unsigned int ret; 282 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value); 283 if (ret) { 284 pr_err("fsl-msi: fh_vmpic_get_msir() failed for " 285 "irq %u (ret=%u)\n", irq, ret); 286 msir_value = 0; 287 } 288 break; 289 } 290 #endif 291 } 292 293 while (msir_value) { 294 intr_index = ffs(msir_value) - 1; 295 296 cascade_irq = irq_linear_revmap(msi_data->irqhost, 297 msi_hwirq(msi_data, msir_index, 298 intr_index + have_shift)); 299 if (cascade_irq != NO_IRQ) 300 generic_handle_irq(cascade_irq); 301 have_shift += intr_index + 1; 302 msir_value = msir_value >> (intr_index + 1); 303 } 304 irqd_clr_chained_irq_inprogress(idata); 305 306 switch (msi_data->feature & FSL_PIC_IP_MASK) { 307 case FSL_PIC_IP_MPIC: 308 case FSL_PIC_IP_VMPIC: 309 chip->irq_eoi(idata); 310 break; 311 case FSL_PIC_IP_IPIC: 312 if (!irqd_irq_disabled(idata) && chip->irq_unmask) 313 chip->irq_unmask(idata); 314 break; 315 } 316 unlock: 317 raw_spin_unlock(&desc->lock); 318 } 319 320 static int fsl_of_msi_remove(struct platform_device *ofdev) 321 { 322 struct fsl_msi *msi = platform_get_drvdata(ofdev); 323 int virq, i; 324 struct fsl_msi_cascade_data *cascade_data; 325 326 if (msi->list.prev != NULL) 327 list_del(&msi->list); 328 for (i = 0; i < NR_MSI_REG_MAX; i++) { 329 virq = msi->msi_virqs[i]; 330 if (virq != NO_IRQ) { 331 cascade_data = irq_get_handler_data(virq); 332 kfree(cascade_data); 333 irq_dispose_mapping(virq); 334 } 335 } 336 if (msi->bitmap.bitmap) 337 msi_bitmap_free(&msi->bitmap); 338 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) 339 iounmap(msi->msi_regs); 340 kfree(msi); 341 342 return 0; 343 } 344 345 static struct lock_class_key fsl_msi_irq_class; 346 347 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, 348 int offset, int irq_index) 349 { 350 struct fsl_msi_cascade_data *cascade_data = NULL; 351 int virt_msir, i; 352 353 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index); 354 if (virt_msir == NO_IRQ) { 355 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n", 356 __func__, irq_index); 357 return 0; 358 } 359 360 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL); 361 if (!cascade_data) { 362 dev_err(&dev->dev, "No memory for MSI cascade data\n"); 363 return -ENOMEM; 364 } 365 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); 366 msi->msi_virqs[irq_index] = virt_msir; 367 cascade_data->index = offset; 368 cascade_data->msi_data = msi; 369 irq_set_handler_data(virt_msir, cascade_data); 370 irq_set_chained_handler(virt_msir, fsl_msi_cascade); 371 372 /* Release the hwirqs corresponding to this MSI register */ 373 for (i = 0; i < IRQS_PER_MSI_REG; i++) 374 msi_bitmap_free_hwirqs(&msi->bitmap, 375 msi_hwirq(msi, offset, i), 1); 376 377 return 0; 378 } 379 380 static const struct of_device_id fsl_of_msi_ids[]; 381 static int fsl_of_msi_probe(struct platform_device *dev) 382 { 383 const struct of_device_id *match; 384 struct fsl_msi *msi; 385 struct resource res, msiir; 386 int err, i, j, irq_index, count; 387 const u32 *p; 388 const struct fsl_msi_feature *features; 389 int len; 390 u32 offset; 391 392 match = of_match_device(fsl_of_msi_ids, &dev->dev); 393 if (!match) 394 return -EINVAL; 395 features = match->data; 396 397 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 398 399 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL); 400 if (!msi) { 401 dev_err(&dev->dev, "No memory for MSI structure\n"); 402 return -ENOMEM; 403 } 404 platform_set_drvdata(dev, msi); 405 406 msi->irqhost = irq_domain_add_linear(dev->dev.of_node, 407 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi); 408 409 if (msi->irqhost == NULL) { 410 dev_err(&dev->dev, "No memory for MSI irqhost\n"); 411 err = -ENOMEM; 412 goto error_out; 413 } 414 415 /* 416 * Under the Freescale hypervisor, the msi nodes don't have a 'reg' 417 * property. Instead, we use hypercalls to access the MSI. 418 */ 419 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) { 420 err = of_address_to_resource(dev->dev.of_node, 0, &res); 421 if (err) { 422 dev_err(&dev->dev, "invalid resource for node %s\n", 423 dev->dev.of_node->full_name); 424 goto error_out; 425 } 426 427 msi->msi_regs = ioremap(res.start, resource_size(&res)); 428 if (!msi->msi_regs) { 429 err = -ENOMEM; 430 dev_err(&dev->dev, "could not map node %s\n", 431 dev->dev.of_node->full_name); 432 goto error_out; 433 } 434 msi->msiir_offset = 435 features->msiir_offset + (res.start & 0xfffff); 436 437 /* 438 * First read the MSIIR/MSIIR1 offset from dts 439 * On failure use the hardcode MSIIR offset 440 */ 441 if (of_address_to_resource(dev->dev.of_node, 1, &msiir)) 442 msi->msiir_offset = features->msiir_offset + 443 (res.start & MSIIR_OFFSET_MASK); 444 else 445 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK; 446 } 447 448 msi->feature = features->fsl_pic_ip; 449 450 /* 451 * Remember the phandle, so that we can match with any PCI nodes 452 * that have an "fsl,msi" property. 453 */ 454 msi->phandle = dev->dev.of_node->phandle; 455 456 err = fsl_msi_init_allocator(msi); 457 if (err) { 458 dev_err(&dev->dev, "Error allocating MSI bitmap\n"); 459 goto error_out; 460 } 461 462 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); 463 464 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3")) { 465 msi->srs_shift = MSIIR1_SRS_SHIFT; 466 msi->ibs_shift = MSIIR1_IBS_SHIFT; 467 if (p) 468 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n", 469 __func__); 470 471 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1; 472 irq_index++) { 473 err = fsl_msi_setup_hwirq(msi, dev, 474 irq_index, irq_index); 475 if (err) 476 goto error_out; 477 } 478 } else { 479 static const u32 all_avail[] = 480 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG }; 481 482 msi->srs_shift = MSIIR_SRS_SHIFT; 483 msi->ibs_shift = MSIIR_IBS_SHIFT; 484 485 if (p && len % (2 * sizeof(u32)) != 0) { 486 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n", 487 __func__); 488 err = -EINVAL; 489 goto error_out; 490 } 491 492 if (!p) { 493 p = all_avail; 494 len = sizeof(all_avail); 495 } 496 497 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) { 498 if (p[i * 2] % IRQS_PER_MSI_REG || 499 p[i * 2 + 1] % IRQS_PER_MSI_REG) { 500 pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n", 501 __func__, dev->dev.of_node->full_name, 502 p[i * 2 + 1], p[i * 2]); 503 err = -EINVAL; 504 goto error_out; 505 } 506 507 offset = p[i * 2] / IRQS_PER_MSI_REG; 508 count = p[i * 2 + 1] / IRQS_PER_MSI_REG; 509 510 for (j = 0; j < count; j++, irq_index++) { 511 err = fsl_msi_setup_hwirq(msi, dev, offset + j, 512 irq_index); 513 if (err) 514 goto error_out; 515 } 516 } 517 } 518 519 list_add_tail(&msi->list, &msi_head); 520 521 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */ 522 if (!ppc_md.setup_msi_irqs) { 523 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs; 524 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs; 525 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) { 526 dev_err(&dev->dev, "Different MSI driver already installed!\n"); 527 err = -ENODEV; 528 goto error_out; 529 } 530 return 0; 531 error_out: 532 fsl_of_msi_remove(dev); 533 return err; 534 } 535 536 static const struct fsl_msi_feature mpic_msi_feature = { 537 .fsl_pic_ip = FSL_PIC_IP_MPIC, 538 .msiir_offset = 0x140, 539 }; 540 541 static const struct fsl_msi_feature ipic_msi_feature = { 542 .fsl_pic_ip = FSL_PIC_IP_IPIC, 543 .msiir_offset = 0x38, 544 }; 545 546 static const struct fsl_msi_feature vmpic_msi_feature = { 547 .fsl_pic_ip = FSL_PIC_IP_VMPIC, 548 .msiir_offset = 0, 549 }; 550 551 static const struct of_device_id fsl_of_msi_ids[] = { 552 { 553 .compatible = "fsl,mpic-msi", 554 .data = &mpic_msi_feature, 555 }, 556 { 557 .compatible = "fsl,mpic-msi-v4.3", 558 .data = &mpic_msi_feature, 559 }, 560 { 561 .compatible = "fsl,ipic-msi", 562 .data = &ipic_msi_feature, 563 }, 564 #ifdef CONFIG_EPAPR_PARAVIRT 565 { 566 .compatible = "fsl,vmpic-msi", 567 .data = &vmpic_msi_feature, 568 }, 569 #endif 570 {} 571 }; 572 573 static struct platform_driver fsl_of_msi_driver = { 574 .driver = { 575 .name = "fsl-msi", 576 .owner = THIS_MODULE, 577 .of_match_table = fsl_of_msi_ids, 578 }, 579 .probe = fsl_of_msi_probe, 580 .remove = fsl_of_msi_remove, 581 }; 582 583 static __init int fsl_of_msi_init(void) 584 { 585 return platform_driver_register(&fsl_of_msi_driver); 586 } 587 588 subsys_initcall(fsl_of_msi_init); 589