1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2006 Yahoo!, Inc. 5 * All rights reserved. 6 * Written by: John Baldwin <jhb@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Support for PCI Message Signalled Interrupts (MSI). MSI interrupts on 35 * x86 are basically APIC messages that the northbridge delivers directly 36 * to the local APICs as if they had come from an I/O APIC. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_acpi.h" 43 44 #include <sys/param.h> 45 #include <sys/bus.h> 46 #include <sys/kernel.h> 47 #include <sys/limits.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mutex.h> 51 #include <sys/sx.h> 52 #include <sys/sysctl.h> 53 #include <sys/systm.h> 54 #include <x86/apicreg.h> 55 #include <machine/cputypes.h> 56 #include <machine/md_var.h> 57 #include <machine/frame.h> 58 #include <machine/intr_machdep.h> 59 #include <x86/apicvar.h> 60 #include <x86/iommu/iommu_intrmap.h> 61 #include <machine/specialreg.h> 62 #include <dev/pci/pcivar.h> 63 64 /* Fields in address for Intel MSI messages. */ 65 #define MSI_INTEL_ADDR_DEST 0x000ff000 66 #define MSI_INTEL_ADDR_RH 0x00000008 67 # define MSI_INTEL_ADDR_RH_ON 0x00000008 68 # define MSI_INTEL_ADDR_RH_OFF 0x00000000 69 #define MSI_INTEL_ADDR_DM 0x00000004 70 # define MSI_INTEL_ADDR_DM_PHYSICAL 0x00000000 71 # define MSI_INTEL_ADDR_DM_LOGICAL 0x00000004 72 73 /* Fields in data for Intel MSI messages. */ 74 #define MSI_INTEL_DATA_TRGRMOD IOART_TRGRMOD /* Trigger mode. */ 75 # define MSI_INTEL_DATA_TRGREDG IOART_TRGREDG 76 # define MSI_INTEL_DATA_TRGRLVL IOART_TRGRLVL 77 #define MSI_INTEL_DATA_LEVEL 0x00004000 /* Polarity. */ 78 # define MSI_INTEL_DATA_DEASSERT 0x00000000 79 # define MSI_INTEL_DATA_ASSERT 0x00004000 80 #define MSI_INTEL_DATA_DELMOD IOART_DELMOD /* Delivery mode. */ 81 # define MSI_INTEL_DATA_DELFIXED IOART_DELFIXED 82 # define MSI_INTEL_DATA_DELLOPRI IOART_DELLOPRI 83 # define MSI_INTEL_DATA_DELSMI IOART_DELSMI 84 # define MSI_INTEL_DATA_DELNMI IOART_DELNMI 85 # define MSI_INTEL_DATA_DELINIT IOART_DELINIT 86 # define MSI_INTEL_DATA_DELEXINT IOART_DELEXINT 87 #define MSI_INTEL_DATA_INTVEC IOART_INTVEC /* Interrupt vector. */ 88 89 /* 90 * Build Intel MSI message and data values from a source. AMD64 systems 91 * seem to be compatible, so we use the same function for both. 92 */ 93 #define INTEL_ADDR(msi) \ 94 (MSI_INTEL_ADDR_BASE | (msi)->msi_cpu << 12 | \ 95 MSI_INTEL_ADDR_RH_OFF | MSI_INTEL_ADDR_DM_PHYSICAL) 96 #define INTEL_DATA(msi) \ 97 (MSI_INTEL_DATA_TRGREDG | MSI_INTEL_DATA_DELFIXED | (msi)->msi_vector) 98 99 static MALLOC_DEFINE(M_MSI, "msi", "PCI MSI"); 100 101 /* 102 * MSI sources are bunched into groups. This is because MSI forces 103 * all of the messages to share the address and data registers and 104 * thus certain properties (such as the local APIC ID target on x86). 105 * Each group has a 'first' source that contains information global to 106 * the group. These fields are marked with (g) below. 107 * 108 * Note that local APIC ID is kind of special. Each message will be 109 * assigned an ID by the system; however, a group will use the ID from 110 * the first message. 111 * 112 * For MSI-X, each message is isolated. 113 */ 114 struct msi_intsrc { 115 struct intsrc msi_intsrc; 116 device_t msi_dev; /* Owning device. (g) */ 117 struct msi_intsrc *msi_first; /* First source in group. */ 118 u_int msi_irq; /* IRQ cookie. */ 119 u_int msi_msix; /* MSI-X message. */ 120 u_int msi_vector:8; /* IDT vector. */ 121 u_int msi_cpu; /* Local APIC ID. (g) */ 122 u_int msi_count:8; /* Messages in this group. (g) */ 123 u_int msi_maxcount:8; /* Alignment for this group. (g) */ 124 u_int *msi_irqs; /* Group's IRQ list. (g) */ 125 u_int msi_remap_cookie; 126 }; 127 128 static void msi_create_source(void); 129 static void msi_enable_source(struct intsrc *isrc); 130 static void msi_disable_source(struct intsrc *isrc, int eoi); 131 static void msi_eoi_source(struct intsrc *isrc); 132 static void msi_enable_intr(struct intsrc *isrc); 133 static void msi_disable_intr(struct intsrc *isrc); 134 static int msi_vector(struct intsrc *isrc); 135 static int msi_source_pending(struct intsrc *isrc); 136 static int msi_config_intr(struct intsrc *isrc, enum intr_trigger trig, 137 enum intr_polarity pol); 138 static int msi_assign_cpu(struct intsrc *isrc, u_int apic_id); 139 140 struct pic msi_pic = { 141 .pic_enable_source = msi_enable_source, 142 .pic_disable_source = msi_disable_source, 143 .pic_eoi_source = msi_eoi_source, 144 .pic_enable_intr = msi_enable_intr, 145 .pic_disable_intr = msi_disable_intr, 146 .pic_vector = msi_vector, 147 .pic_source_pending = msi_source_pending, 148 .pic_suspend = NULL, 149 .pic_resume = NULL, 150 .pic_config_intr = msi_config_intr, 151 .pic_assign_cpu = msi_assign_cpu, 152 .pic_reprogram_pin = NULL, 153 }; 154 155 u_int first_msi_irq; 156 SYSCTL_UINT(_machdep, OID_AUTO, first_msi_irq, CTLFLAG_RD, &first_msi_irq, 0, 157 "Number of first IRQ reserved for MSI and MSI-X interrupts"); 158 159 u_int num_msi_irqs = 512; 160 SYSCTL_UINT(_machdep, OID_AUTO, num_msi_irqs, CTLFLAG_RDTUN, &num_msi_irqs, 0, 161 "Number of IRQs reserved for MSI and MSI-X interrupts"); 162 163 #ifdef SMP 164 /** 165 * Xen hypervisors prior to 4.6.0 do not properly handle updates to 166 * enabled MSI-X table entries. Allow migration of MSI-X interrupts 167 * to be disabled via a tunable. Values have the following meaning: 168 * 169 * -1: automatic detection by FreeBSD 170 * 0: enable migration 171 * 1: disable migration 172 */ 173 int msix_disable_migration = -1; 174 SYSCTL_INT(_machdep, OID_AUTO, disable_msix_migration, CTLFLAG_RDTUN, 175 &msix_disable_migration, 0, 176 "Disable migration of MSI-X interrupts between CPUs"); 177 #endif 178 179 static int msi_enabled; 180 static u_int msi_last_irq; 181 static struct mtx msi_lock; 182 183 static void 184 msi_enable_source(struct intsrc *isrc) 185 { 186 } 187 188 static void 189 msi_disable_source(struct intsrc *isrc, int eoi) 190 { 191 192 if (eoi == PIC_EOI) 193 lapic_eoi(); 194 } 195 196 static void 197 msi_eoi_source(struct intsrc *isrc) 198 { 199 200 lapic_eoi(); 201 } 202 203 static void 204 msi_enable_intr(struct intsrc *isrc) 205 { 206 struct msi_intsrc *msi = (struct msi_intsrc *)isrc; 207 208 apic_enable_vector(msi->msi_cpu, msi->msi_vector); 209 } 210 211 static void 212 msi_disable_intr(struct intsrc *isrc) 213 { 214 struct msi_intsrc *msi = (struct msi_intsrc *)isrc; 215 216 apic_disable_vector(msi->msi_cpu, msi->msi_vector); 217 } 218 219 static int 220 msi_vector(struct intsrc *isrc) 221 { 222 struct msi_intsrc *msi = (struct msi_intsrc *)isrc; 223 224 return (msi->msi_irq); 225 } 226 227 static int 228 msi_source_pending(struct intsrc *isrc) 229 { 230 231 return (0); 232 } 233 234 static int 235 msi_config_intr(struct intsrc *isrc, enum intr_trigger trig, 236 enum intr_polarity pol) 237 { 238 239 return (ENODEV); 240 } 241 242 static int 243 msi_assign_cpu(struct intsrc *isrc, u_int apic_id) 244 { 245 struct msi_intsrc *sib, *msi = (struct msi_intsrc *)isrc; 246 int old_vector; 247 u_int old_id; 248 int i, vector; 249 250 /* 251 * Only allow CPUs to be assigned to the first message for an 252 * MSI group. 253 */ 254 if (msi->msi_first != msi) 255 return (EINVAL); 256 257 #ifdef SMP 258 if (msix_disable_migration && msi->msi_msix) 259 return (EINVAL); 260 #endif 261 262 /* Store information to free existing irq. */ 263 old_vector = msi->msi_vector; 264 old_id = msi->msi_cpu; 265 if (old_id == apic_id) 266 return (0); 267 268 /* Allocate IDT vectors on this cpu. */ 269 if (msi->msi_count > 1) { 270 KASSERT(msi->msi_msix == 0, ("MSI-X message group")); 271 vector = apic_alloc_vectors(apic_id, msi->msi_irqs, 272 msi->msi_count, msi->msi_maxcount); 273 } else 274 vector = apic_alloc_vector(apic_id, msi->msi_irq); 275 if (vector == 0) 276 return (ENOSPC); 277 278 msi->msi_cpu = apic_id; 279 msi->msi_vector = vector; 280 if (msi->msi_intsrc.is_handlers > 0) 281 apic_enable_vector(msi->msi_cpu, msi->msi_vector); 282 if (bootverbose) 283 printf("msi: Assigning %s IRQ %d to local APIC %u vector %u\n", 284 msi->msi_msix ? "MSI-X" : "MSI", msi->msi_irq, 285 msi->msi_cpu, msi->msi_vector); 286 for (i = 1; i < msi->msi_count; i++) { 287 sib = (struct msi_intsrc *)intr_lookup_source(msi->msi_irqs[i]); 288 sib->msi_cpu = apic_id; 289 sib->msi_vector = vector + i; 290 if (sib->msi_intsrc.is_handlers > 0) 291 apic_enable_vector(sib->msi_cpu, sib->msi_vector); 292 if (bootverbose) 293 printf( 294 "msi: Assigning MSI IRQ %d to local APIC %u vector %u\n", 295 sib->msi_irq, sib->msi_cpu, sib->msi_vector); 296 } 297 BUS_REMAP_INTR(device_get_parent(msi->msi_dev), msi->msi_dev, 298 msi->msi_irq); 299 300 /* 301 * Free the old vector after the new one is established. This is done 302 * to prevent races where we could miss an interrupt. 303 */ 304 if (msi->msi_intsrc.is_handlers > 0) 305 apic_disable_vector(old_id, old_vector); 306 apic_free_vector(old_id, old_vector, msi->msi_irq); 307 for (i = 1; i < msi->msi_count; i++) { 308 sib = (struct msi_intsrc *)intr_lookup_source(msi->msi_irqs[i]); 309 if (sib->msi_intsrc.is_handlers > 0) 310 apic_disable_vector(old_id, old_vector + i); 311 apic_free_vector(old_id, old_vector + i, msi->msi_irqs[i]); 312 } 313 return (0); 314 } 315 316 void 317 msi_init(void) 318 { 319 320 /* Check if we have a supported CPU. */ 321 switch (cpu_vendor_id) { 322 case CPU_VENDOR_INTEL: 323 case CPU_VENDOR_AMD: 324 break; 325 case CPU_VENDOR_CENTAUR: 326 if (CPUID_TO_FAMILY(cpu_id) == 0x6 && 327 CPUID_TO_MODEL(cpu_id) >= 0xf) 328 break; 329 /* FALLTHROUGH */ 330 default: 331 return; 332 } 333 334 #ifdef SMP 335 if (msix_disable_migration == -1) { 336 /* The default is to allow migration of MSI-X interrupts. */ 337 msix_disable_migration = 0; 338 } 339 #endif 340 341 if (num_msi_irqs == 0) 342 return; 343 344 first_msi_irq = num_io_irqs; 345 if (num_msi_irqs > UINT_MAX - first_msi_irq) 346 panic("num_msi_irqs too high"); 347 num_io_irqs = first_msi_irq + num_msi_irqs; 348 349 msi_enabled = 1; 350 intr_register_pic(&msi_pic); 351 mtx_init(&msi_lock, "msi", NULL, MTX_DEF); 352 } 353 354 static void 355 msi_create_source(void) 356 { 357 struct msi_intsrc *msi; 358 u_int irq; 359 360 mtx_lock(&msi_lock); 361 if (msi_last_irq >= num_msi_irqs) { 362 mtx_unlock(&msi_lock); 363 return; 364 } 365 irq = msi_last_irq + first_msi_irq; 366 msi_last_irq++; 367 mtx_unlock(&msi_lock); 368 369 msi = malloc(sizeof(struct msi_intsrc), M_MSI, M_WAITOK | M_ZERO); 370 msi->msi_intsrc.is_pic = &msi_pic; 371 msi->msi_irq = irq; 372 intr_register_source(&msi->msi_intsrc); 373 nexus_add_irq(irq); 374 } 375 376 /* 377 * Try to allocate 'count' interrupt sources with contiguous IDT values. 378 */ 379 int 380 msi_alloc(device_t dev, int count, int maxcount, int *irqs) 381 { 382 struct msi_intsrc *msi, *fsrc; 383 u_int cpu, domain, *mirqs; 384 int cnt, i, vector; 385 #ifdef ACPI_DMAR 386 u_int cookies[count]; 387 int error; 388 #endif 389 390 if (!msi_enabled) 391 return (ENXIO); 392 393 if (bus_get_domain(dev, &domain) != 0) 394 domain = 0; 395 396 if (count > 1) 397 mirqs = malloc(count * sizeof(*mirqs), M_MSI, M_WAITOK); 398 else 399 mirqs = NULL; 400 again: 401 mtx_lock(&msi_lock); 402 403 /* Try to find 'count' free IRQs. */ 404 cnt = 0; 405 for (i = first_msi_irq; i < first_msi_irq + num_msi_irqs; i++) { 406 msi = (struct msi_intsrc *)intr_lookup_source(i); 407 408 /* End of allocated sources, so break. */ 409 if (msi == NULL) 410 break; 411 412 /* If this is a free one, save its IRQ in the array. */ 413 if (msi->msi_dev == NULL) { 414 irqs[cnt] = i; 415 cnt++; 416 if (cnt == count) 417 break; 418 } 419 } 420 421 /* Do we need to create some new sources? */ 422 if (cnt < count) { 423 /* If we would exceed the max, give up. */ 424 if (i + (count - cnt) > first_msi_irq + num_msi_irqs) { 425 mtx_unlock(&msi_lock); 426 free(mirqs, M_MSI); 427 return (ENXIO); 428 } 429 mtx_unlock(&msi_lock); 430 431 /* We need count - cnt more sources. */ 432 while (cnt < count) { 433 msi_create_source(); 434 cnt++; 435 } 436 goto again; 437 } 438 439 /* Ok, we now have the IRQs allocated. */ 440 KASSERT(cnt == count, ("count mismatch")); 441 442 /* Allocate 'count' IDT vectors. */ 443 cpu = intr_next_cpu(domain); 444 vector = apic_alloc_vectors(cpu, irqs, count, maxcount); 445 if (vector == 0) { 446 mtx_unlock(&msi_lock); 447 free(mirqs, M_MSI); 448 return (ENOSPC); 449 } 450 451 #ifdef ACPI_DMAR 452 mtx_unlock(&msi_lock); 453 error = iommu_alloc_msi_intr(dev, cookies, count); 454 mtx_lock(&msi_lock); 455 if (error == EOPNOTSUPP) 456 error = 0; 457 if (error != 0) { 458 for (i = 0; i < count; i++) 459 apic_free_vector(cpu, vector + i, irqs[i]); 460 free(mirqs, M_MSI); 461 return (error); 462 } 463 for (i = 0; i < count; i++) { 464 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); 465 msi->msi_remap_cookie = cookies[i]; 466 } 467 #endif 468 469 /* Assign IDT vectors and make these messages owned by 'dev'. */ 470 fsrc = (struct msi_intsrc *)intr_lookup_source(irqs[0]); 471 for (i = 0; i < count; i++) { 472 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); 473 msi->msi_cpu = cpu; 474 msi->msi_dev = dev; 475 msi->msi_vector = vector + i; 476 if (bootverbose) 477 printf( 478 "msi: routing MSI IRQ %d to local APIC %u vector %u\n", 479 msi->msi_irq, msi->msi_cpu, msi->msi_vector); 480 msi->msi_first = fsrc; 481 KASSERT(msi->msi_intsrc.is_handlers == 0, 482 ("dead MSI has handlers")); 483 } 484 fsrc->msi_count = count; 485 fsrc->msi_maxcount = maxcount; 486 if (count > 1) 487 bcopy(irqs, mirqs, count * sizeof(*mirqs)); 488 fsrc->msi_irqs = mirqs; 489 mtx_unlock(&msi_lock); 490 return (0); 491 } 492 493 int 494 msi_release(int *irqs, int count) 495 { 496 struct msi_intsrc *msi, *first; 497 int i; 498 499 mtx_lock(&msi_lock); 500 first = (struct msi_intsrc *)intr_lookup_source(irqs[0]); 501 if (first == NULL) { 502 mtx_unlock(&msi_lock); 503 return (ENOENT); 504 } 505 506 /* Make sure this isn't an MSI-X message. */ 507 if (first->msi_msix) { 508 mtx_unlock(&msi_lock); 509 return (EINVAL); 510 } 511 512 /* Make sure this message is allocated to a group. */ 513 if (first->msi_first == NULL) { 514 mtx_unlock(&msi_lock); 515 return (ENXIO); 516 } 517 518 /* 519 * Make sure this is the start of a group and that we are releasing 520 * the entire group. 521 */ 522 if (first->msi_first != first || first->msi_count != count) { 523 mtx_unlock(&msi_lock); 524 return (EINVAL); 525 } 526 KASSERT(first->msi_dev != NULL, ("unowned group")); 527 528 /* Clear all the extra messages in the group. */ 529 for (i = 1; i < count; i++) { 530 msi = (struct msi_intsrc *)intr_lookup_source(irqs[i]); 531 KASSERT(msi->msi_first == first, ("message not in group")); 532 KASSERT(msi->msi_dev == first->msi_dev, ("owner mismatch")); 533 #ifdef ACPI_DMAR 534 iommu_unmap_msi_intr(first->msi_dev, msi->msi_remap_cookie); 535 #endif 536 msi->msi_first = NULL; 537 msi->msi_dev = NULL; 538 apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq); 539 msi->msi_vector = 0; 540 } 541 542 /* Clear out the first message. */ 543 #ifdef ACPI_DMAR 544 mtx_unlock(&msi_lock); 545 iommu_unmap_msi_intr(first->msi_dev, first->msi_remap_cookie); 546 mtx_lock(&msi_lock); 547 #endif 548 first->msi_first = NULL; 549 first->msi_dev = NULL; 550 apic_free_vector(first->msi_cpu, first->msi_vector, first->msi_irq); 551 first->msi_vector = 0; 552 first->msi_count = 0; 553 first->msi_maxcount = 0; 554 free(first->msi_irqs, M_MSI); 555 first->msi_irqs = NULL; 556 557 mtx_unlock(&msi_lock); 558 return (0); 559 } 560 561 int 562 msi_map(int irq, uint64_t *addr, uint32_t *data) 563 { 564 struct msi_intsrc *msi; 565 int error; 566 #ifdef ACPI_DMAR 567 struct msi_intsrc *msi1; 568 int i, k; 569 #endif 570 571 mtx_lock(&msi_lock); 572 msi = (struct msi_intsrc *)intr_lookup_source(irq); 573 if (msi == NULL) { 574 mtx_unlock(&msi_lock); 575 return (ENOENT); 576 } 577 578 /* Make sure this message is allocated to a device. */ 579 if (msi->msi_dev == NULL) { 580 mtx_unlock(&msi_lock); 581 return (ENXIO); 582 } 583 584 /* 585 * If this message isn't an MSI-X message, make sure it's part 586 * of a group, and switch to the first message in the 587 * group. 588 */ 589 if (!msi->msi_msix) { 590 if (msi->msi_first == NULL) { 591 mtx_unlock(&msi_lock); 592 return (ENXIO); 593 } 594 msi = msi->msi_first; 595 } 596 597 #ifdef ACPI_DMAR 598 if (!msi->msi_msix) { 599 for (k = msi->msi_count - 1, i = first_msi_irq; k > 0 && 600 i < first_msi_irq + num_msi_irqs; i++) { 601 if (i == msi->msi_irq) 602 continue; 603 msi1 = (struct msi_intsrc *)intr_lookup_source(i); 604 if (!msi1->msi_msix && msi1->msi_first == msi) { 605 mtx_unlock(&msi_lock); 606 iommu_map_msi_intr(msi1->msi_dev, 607 msi1->msi_cpu, msi1->msi_vector, 608 msi1->msi_remap_cookie, NULL, NULL); 609 k--; 610 mtx_lock(&msi_lock); 611 } 612 } 613 } 614 mtx_unlock(&msi_lock); 615 error = iommu_map_msi_intr(msi->msi_dev, msi->msi_cpu, 616 msi->msi_vector, msi->msi_remap_cookie, addr, data); 617 #else 618 mtx_unlock(&msi_lock); 619 error = EOPNOTSUPP; 620 #endif 621 if (error == EOPNOTSUPP) { 622 *addr = INTEL_ADDR(msi); 623 *data = INTEL_DATA(msi); 624 error = 0; 625 } 626 return (error); 627 } 628 629 int 630 msix_alloc(device_t dev, int *irq) 631 { 632 struct msi_intsrc *msi; 633 u_int cpu, domain; 634 int i, vector; 635 #ifdef ACPI_DMAR 636 u_int cookie; 637 int error; 638 #endif 639 640 if (!msi_enabled) 641 return (ENXIO); 642 643 if (bus_get_domain(dev, &domain) != 0) 644 domain = 0; 645 646 again: 647 mtx_lock(&msi_lock); 648 649 /* Find a free IRQ. */ 650 for (i = first_msi_irq; i < first_msi_irq + num_msi_irqs; i++) { 651 msi = (struct msi_intsrc *)intr_lookup_source(i); 652 653 /* End of allocated sources, so break. */ 654 if (msi == NULL) 655 break; 656 657 /* Stop at the first free source. */ 658 if (msi->msi_dev == NULL) 659 break; 660 } 661 662 /* Are all IRQs in use? */ 663 if (i == first_msi_irq + num_msi_irqs) { 664 mtx_unlock(&msi_lock); 665 return (ENXIO); 666 } 667 668 /* Do we need to create a new source? */ 669 if (msi == NULL) { 670 mtx_unlock(&msi_lock); 671 672 /* Create a new source. */ 673 msi_create_source(); 674 goto again; 675 } 676 677 /* Allocate an IDT vector. */ 678 cpu = intr_next_cpu(domain); 679 vector = apic_alloc_vector(cpu, i); 680 if (vector == 0) { 681 mtx_unlock(&msi_lock); 682 return (ENOSPC); 683 } 684 685 msi->msi_dev = dev; 686 #ifdef ACPI_DMAR 687 mtx_unlock(&msi_lock); 688 error = iommu_alloc_msi_intr(dev, &cookie, 1); 689 mtx_lock(&msi_lock); 690 if (error == EOPNOTSUPP) 691 error = 0; 692 if (error != 0) { 693 msi->msi_dev = NULL; 694 apic_free_vector(cpu, vector, i); 695 return (error); 696 } 697 msi->msi_remap_cookie = cookie; 698 #endif 699 700 if (bootverbose) 701 printf("msi: routing MSI-X IRQ %d to local APIC %u vector %u\n", 702 msi->msi_irq, cpu, vector); 703 704 /* Setup source. */ 705 msi->msi_cpu = cpu; 706 msi->msi_first = msi; 707 msi->msi_vector = vector; 708 msi->msi_msix = 1; 709 msi->msi_count = 1; 710 msi->msi_maxcount = 1; 711 msi->msi_irqs = NULL; 712 713 KASSERT(msi->msi_intsrc.is_handlers == 0, ("dead MSI-X has handlers")); 714 mtx_unlock(&msi_lock); 715 716 *irq = i; 717 return (0); 718 } 719 720 int 721 msix_release(int irq) 722 { 723 struct msi_intsrc *msi; 724 725 mtx_lock(&msi_lock); 726 msi = (struct msi_intsrc *)intr_lookup_source(irq); 727 if (msi == NULL) { 728 mtx_unlock(&msi_lock); 729 return (ENOENT); 730 } 731 732 /* Make sure this is an MSI-X message. */ 733 if (!msi->msi_msix) { 734 mtx_unlock(&msi_lock); 735 return (EINVAL); 736 } 737 738 KASSERT(msi->msi_dev != NULL, ("unowned message")); 739 740 /* Clear out the message. */ 741 #ifdef ACPI_DMAR 742 mtx_unlock(&msi_lock); 743 iommu_unmap_msi_intr(msi->msi_dev, msi->msi_remap_cookie); 744 mtx_lock(&msi_lock); 745 #endif 746 msi->msi_first = NULL; 747 msi->msi_dev = NULL; 748 apic_free_vector(msi->msi_cpu, msi->msi_vector, msi->msi_irq); 749 msi->msi_vector = 0; 750 msi->msi_msix = 0; 751 msi->msi_count = 0; 752 msi->msi_maxcount = 0; 753 754 mtx_unlock(&msi_lock); 755 return (0); 756 } 757