1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2014 Pluribus Networks Inc. 41 * Copyright 2018 Joyent, Inc. 42 */ 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include <sys/param.h> 48 #include <sys/linker_set.h> 49 50 #include <ctype.h> 51 #include <errno.h> 52 #include <pthread.h> 53 #include <stdio.h> 54 #include <stdlib.h> 55 #include <string.h> 56 #include <strings.h> 57 #include <assert.h> 58 #include <stdbool.h> 59 60 #include <machine/vmm.h> 61 #include <vmmapi.h> 62 63 #include "acpi.h" 64 #include "bhyverun.h" 65 #include "debug.h" 66 #include "inout.h" 67 #include "ioapic.h" 68 #include "mem.h" 69 #include "pci_emul.h" 70 #include "pci_irq.h" 71 #include "pci_lpc.h" 72 73 #define CONF1_ADDR_PORT 0x0cf8 74 #define CONF1_DATA_PORT 0x0cfc 75 76 #define CONF1_ENABLE 0x80000000ul 77 78 #define MAXBUSES (PCI_BUSMAX + 1) 79 #define MAXSLOTS (PCI_SLOTMAX + 1) 80 #define MAXFUNCS (PCI_FUNCMAX + 1) 81 82 struct funcinfo { 83 char *fi_name; 84 char *fi_param; 85 struct pci_devinst *fi_devi; 86 }; 87 88 struct intxinfo { 89 int ii_count; 90 int ii_pirq_pin; 91 int ii_ioapic_irq; 92 }; 93 94 struct slotinfo { 95 struct intxinfo si_intpins[4]; 96 struct funcinfo si_funcs[MAXFUNCS]; 97 }; 98 99 struct businfo { 100 uint16_t iobase, iolimit; /* I/O window */ 101 uint32_t membase32, memlimit32; /* mmio window below 4GB */ 102 uint64_t membase64, memlimit64; /* mmio window above 4GB */ 103 struct slotinfo slotinfo[MAXSLOTS]; 104 }; 105 106 static struct businfo *pci_businfo[MAXBUSES]; 107 108 SET_DECLARE(pci_devemu_set, struct pci_devemu); 109 110 static uint64_t pci_emul_iobase; 111 static uint64_t pci_emul_membase32; 112 static uint64_t pci_emul_membase64; 113 114 #define PCI_EMUL_IOBASE 0x2000 115 #define PCI_EMUL_IOLIMIT 0x10000 116 117 #define PCI_EMUL_ECFG_BASE 0xE0000000 /* 3.5GB */ 118 #define PCI_EMUL_ECFG_SIZE (MAXBUSES * 1024 * 1024) /* 1MB per bus */ 119 SYSRES_MEM(PCI_EMUL_ECFG_BASE, PCI_EMUL_ECFG_SIZE); 120 121 #define PCI_EMUL_MEMLIMIT32 PCI_EMUL_ECFG_BASE 122 123 #define PCI_EMUL_MEMBASE64 0xD000000000UL 124 #define PCI_EMUL_MEMLIMIT64 0xFD00000000UL 125 126 static struct pci_devemu *pci_emul_finddev(char *name); 127 static void pci_lintr_route(struct pci_devinst *pi); 128 static void pci_lintr_update(struct pci_devinst *pi); 129 static void pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, 130 int func, int coff, int bytes, uint32_t *val); 131 132 static __inline void 133 CFGWRITE(struct pci_devinst *pi, int coff, uint32_t val, int bytes) 134 { 135 136 if (bytes == 1) 137 pci_set_cfgdata8(pi, coff, val); 138 else if (bytes == 2) 139 pci_set_cfgdata16(pi, coff, val); 140 else 141 pci_set_cfgdata32(pi, coff, val); 142 } 143 144 static __inline uint32_t 145 CFGREAD(struct pci_devinst *pi, int coff, int bytes) 146 { 147 148 if (bytes == 1) 149 return (pci_get_cfgdata8(pi, coff)); 150 else if (bytes == 2) 151 return (pci_get_cfgdata16(pi, coff)); 152 else 153 return (pci_get_cfgdata32(pi, coff)); 154 } 155 156 /* 157 * I/O access 158 */ 159 160 /* 161 * Slot options are in the form: 162 * 163 * <bus>:<slot>:<func>,<emul>[,<config>] 164 * <slot>[:<func>],<emul>[,<config>] 165 * 166 * slot is 0..31 167 * func is 0..7 168 * emul is a string describing the type of PCI device e.g. virtio-net 169 * config is an optional string, depending on the device, that can be 170 * used for configuration. 171 * Examples are: 172 * 1,virtio-net,tap0 173 * 3:0,dummy 174 */ 175 static void 176 pci_parse_slot_usage(char *aopt) 177 { 178 179 EPRINTLN("Invalid PCI slot info field \"%s\"", aopt); 180 } 181 182 int 183 pci_parse_slot(char *opt) 184 { 185 struct businfo *bi; 186 struct slotinfo *si; 187 char *emul, *config, *str, *cp; 188 int error, bnum, snum, fnum; 189 190 error = -1; 191 str = strdup(opt); 192 193 emul = config = NULL; 194 if ((cp = strchr(str, ',')) != NULL) { 195 *cp = '\0'; 196 emul = cp + 1; 197 if ((cp = strchr(emul, ',')) != NULL) { 198 *cp = '\0'; 199 config = cp + 1; 200 } 201 } else { 202 pci_parse_slot_usage(opt); 203 goto done; 204 } 205 206 /* <bus>:<slot>:<func> */ 207 if (sscanf(str, "%d:%d:%d", &bnum, &snum, &fnum) != 3) { 208 bnum = 0; 209 /* <slot>:<func> */ 210 if (sscanf(str, "%d:%d", &snum, &fnum) != 2) { 211 fnum = 0; 212 /* <slot> */ 213 if (sscanf(str, "%d", &snum) != 1) { 214 snum = -1; 215 } 216 } 217 } 218 219 if (bnum < 0 || bnum >= MAXBUSES || snum < 0 || snum >= MAXSLOTS || 220 fnum < 0 || fnum >= MAXFUNCS) { 221 pci_parse_slot_usage(opt); 222 goto done; 223 } 224 225 if (pci_businfo[bnum] == NULL) 226 pci_businfo[bnum] = calloc(1, sizeof(struct businfo)); 227 228 bi = pci_businfo[bnum]; 229 si = &bi->slotinfo[snum]; 230 231 if (si->si_funcs[fnum].fi_name != NULL) { 232 EPRINTLN("pci slot %d:%d already occupied!", 233 snum, fnum); 234 goto done; 235 } 236 237 if (pci_emul_finddev(emul) == NULL) { 238 EPRINTLN("pci slot %d:%d: unknown device \"%s\"", 239 snum, fnum, emul); 240 goto done; 241 } 242 243 error = 0; 244 si->si_funcs[fnum].fi_name = emul; 245 si->si_funcs[fnum].fi_param = config; 246 247 done: 248 if (error) 249 free(str); 250 251 return (error); 252 } 253 254 void 255 pci_print_supported_devices() 256 { 257 struct pci_devemu **pdpp, *pdp; 258 259 SET_FOREACH(pdpp, pci_devemu_set) { 260 pdp = *pdpp; 261 printf("%s\n", pdp->pe_emu); 262 } 263 } 264 265 static int 266 pci_valid_pba_offset(struct pci_devinst *pi, uint64_t offset) 267 { 268 269 if (offset < pi->pi_msix.pba_offset) 270 return (0); 271 272 if (offset >= pi->pi_msix.pba_offset + pi->pi_msix.pba_size) { 273 return (0); 274 } 275 276 return (1); 277 } 278 279 int 280 pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size, 281 uint64_t value) 282 { 283 int msix_entry_offset; 284 int tab_index; 285 char *dest; 286 287 /* support only 4 or 8 byte writes */ 288 if (size != 4 && size != 8) 289 return (-1); 290 291 /* 292 * Return if table index is beyond what device supports 293 */ 294 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 295 if (tab_index >= pi->pi_msix.table_count) 296 return (-1); 297 298 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 299 300 /* support only aligned writes */ 301 if ((msix_entry_offset % size) != 0) 302 return (-1); 303 304 dest = (char *)(pi->pi_msix.table + tab_index); 305 dest += msix_entry_offset; 306 307 if (size == 4) 308 *((uint32_t *)dest) = value; 309 else 310 *((uint64_t *)dest) = value; 311 312 return (0); 313 } 314 315 uint64_t 316 pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size) 317 { 318 char *dest; 319 int msix_entry_offset; 320 int tab_index; 321 uint64_t retval = ~0; 322 323 /* 324 * The PCI standard only allows 4 and 8 byte accesses to the MSI-X 325 * table but we also allow 1 byte access to accommodate reads from 326 * ddb. 327 */ 328 if (size != 1 && size != 4 && size != 8) 329 return (retval); 330 331 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 332 333 /* support only aligned reads */ 334 if ((msix_entry_offset % size) != 0) { 335 return (retval); 336 } 337 338 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 339 340 if (tab_index < pi->pi_msix.table_count) { 341 /* valid MSI-X Table access */ 342 dest = (char *)(pi->pi_msix.table + tab_index); 343 dest += msix_entry_offset; 344 345 if (size == 1) 346 retval = *((uint8_t *)dest); 347 else if (size == 4) 348 retval = *((uint32_t *)dest); 349 else 350 retval = *((uint64_t *)dest); 351 } else if (pci_valid_pba_offset(pi, offset)) { 352 /* return 0 for PBA access */ 353 retval = 0; 354 } 355 356 return (retval); 357 } 358 359 int 360 pci_msix_table_bar(struct pci_devinst *pi) 361 { 362 363 if (pi->pi_msix.table != NULL) 364 return (pi->pi_msix.table_bar); 365 else 366 return (-1); 367 } 368 369 int 370 pci_msix_pba_bar(struct pci_devinst *pi) 371 { 372 373 if (pi->pi_msix.table != NULL) 374 return (pi->pi_msix.pba_bar); 375 else 376 return (-1); 377 } 378 379 static int 380 pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 381 uint32_t *eax, void *arg) 382 { 383 struct pci_devinst *pdi = arg; 384 struct pci_devemu *pe = pdi->pi_d; 385 uint64_t offset; 386 int i; 387 388 for (i = 0; i <= PCI_BARMAX; i++) { 389 if (pdi->pi_bar[i].type == PCIBAR_IO && 390 port >= pdi->pi_bar[i].addr && 391 port + bytes <= pdi->pi_bar[i].addr + pdi->pi_bar[i].size) { 392 offset = port - pdi->pi_bar[i].addr; 393 if (in) 394 *eax = (*pe->pe_barread)(ctx, vcpu, pdi, i, 395 offset, bytes); 396 else 397 (*pe->pe_barwrite)(ctx, vcpu, pdi, i, offset, 398 bytes, *eax); 399 return (0); 400 } 401 } 402 return (-1); 403 } 404 405 static int 406 pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 407 int size, uint64_t *val, void *arg1, long arg2) 408 { 409 struct pci_devinst *pdi = arg1; 410 struct pci_devemu *pe = pdi->pi_d; 411 uint64_t offset; 412 int bidx = (int) arg2; 413 414 assert(bidx <= PCI_BARMAX); 415 assert(pdi->pi_bar[bidx].type == PCIBAR_MEM32 || 416 pdi->pi_bar[bidx].type == PCIBAR_MEM64); 417 assert(addr >= pdi->pi_bar[bidx].addr && 418 addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size); 419 420 offset = addr - pdi->pi_bar[bidx].addr; 421 422 if (dir == MEM_F_WRITE) { 423 if (size == 8) { 424 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset, 425 4, *val & 0xffffffff); 426 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset + 4, 427 4, *val >> 32); 428 } else { 429 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset, 430 size, *val); 431 } 432 } else { 433 if (size == 8) { 434 *val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx, 435 offset, 4); 436 *val |= (*pe->pe_barread)(ctx, vcpu, pdi, bidx, 437 offset + 4, 4) << 32; 438 } else { 439 *val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx, 440 offset, size); 441 } 442 } 443 444 return (0); 445 } 446 447 448 static int 449 pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, 450 uint64_t *addr) 451 { 452 uint64_t base; 453 454 assert((size & (size - 1)) == 0); /* must be a power of 2 */ 455 456 base = roundup2(*baseptr, size); 457 458 if (base + size <= limit) { 459 *addr = base; 460 *baseptr = base + size; 461 return (0); 462 } else 463 return (-1); 464 } 465 466 /* 467 * Register (or unregister) the MMIO or I/O region associated with the BAR 468 * register 'idx' of an emulated pci device. 469 */ 470 static void 471 modify_bar_registration(struct pci_devinst *pi, int idx, int registration) 472 { 473 int error; 474 struct inout_port iop; 475 struct mem_range mr; 476 477 switch (pi->pi_bar[idx].type) { 478 case PCIBAR_IO: 479 bzero(&iop, sizeof(struct inout_port)); 480 iop.name = pi->pi_name; 481 iop.port = pi->pi_bar[idx].addr; 482 iop.size = pi->pi_bar[idx].size; 483 if (registration) { 484 iop.flags = IOPORT_F_INOUT; 485 iop.handler = pci_emul_io_handler; 486 iop.arg = pi; 487 error = register_inout(&iop); 488 } else 489 error = unregister_inout(&iop); 490 break; 491 case PCIBAR_MEM32: 492 case PCIBAR_MEM64: 493 bzero(&mr, sizeof(struct mem_range)); 494 mr.name = pi->pi_name; 495 mr.base = pi->pi_bar[idx].addr; 496 mr.size = pi->pi_bar[idx].size; 497 if (registration) { 498 mr.flags = MEM_F_RW; 499 mr.handler = pci_emul_mem_handler; 500 mr.arg1 = pi; 501 mr.arg2 = idx; 502 error = register_mem(&mr); 503 } else 504 error = unregister_mem(&mr); 505 break; 506 default: 507 error = EINVAL; 508 break; 509 } 510 assert(error == 0); 511 } 512 513 static void 514 unregister_bar(struct pci_devinst *pi, int idx) 515 { 516 517 modify_bar_registration(pi, idx, 0); 518 } 519 520 static void 521 register_bar(struct pci_devinst *pi, int idx) 522 { 523 524 modify_bar_registration(pi, idx, 1); 525 } 526 527 /* Are we decoding i/o port accesses for the emulated pci device? */ 528 static int 529 porten(struct pci_devinst *pi) 530 { 531 uint16_t cmd; 532 533 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 534 535 return (cmd & PCIM_CMD_PORTEN); 536 } 537 538 /* Are we decoding memory accesses for the emulated pci device? */ 539 static int 540 memen(struct pci_devinst *pi) 541 { 542 uint16_t cmd; 543 544 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 545 546 return (cmd & PCIM_CMD_MEMEN); 547 } 548 549 /* 550 * Update the MMIO or I/O address that is decoded by the BAR register. 551 * 552 * If the pci device has enabled the address space decoding then intercept 553 * the address range decoded by the BAR register. 554 */ 555 static void 556 update_bar_address(struct pci_devinst *pi, uint64_t addr, int idx, int type) 557 { 558 int decode; 559 560 if (pi->pi_bar[idx].type == PCIBAR_IO) 561 decode = porten(pi); 562 else 563 decode = memen(pi); 564 565 if (decode) 566 unregister_bar(pi, idx); 567 568 switch (type) { 569 case PCIBAR_IO: 570 case PCIBAR_MEM32: 571 pi->pi_bar[idx].addr = addr; 572 break; 573 case PCIBAR_MEM64: 574 pi->pi_bar[idx].addr &= ~0xffffffffUL; 575 pi->pi_bar[idx].addr |= addr; 576 break; 577 case PCIBAR_MEMHI64: 578 pi->pi_bar[idx].addr &= 0xffffffff; 579 pi->pi_bar[idx].addr |= addr; 580 break; 581 default: 582 assert(0); 583 } 584 585 if (decode) 586 register_bar(pi, idx); 587 } 588 589 int 590 pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, 591 uint64_t size) 592 { 593 uint64_t *baseptr = NULL; 594 uint64_t limit = 0, lobits = 0; 595 uint64_t addr, mask, bar; 596 uint16_t cmd, enbit; 597 int error; 598 599 assert(idx >= 0 && idx <= PCI_BARMAX); 600 601 if ((size & (size - 1)) != 0) 602 size = 1UL << flsl(size); /* round up to a power of 2 */ 603 604 /* Enforce minimum BAR sizes required by the PCI standard */ 605 if (type == PCIBAR_IO) { 606 if (size < 4) 607 size = 4; 608 } else { 609 if (size < 16) 610 size = 16; 611 } 612 613 switch (type) { 614 case PCIBAR_NONE: 615 baseptr = NULL; 616 addr = mask = lobits = enbit = 0; 617 break; 618 case PCIBAR_IO: 619 baseptr = &pci_emul_iobase; 620 limit = PCI_EMUL_IOLIMIT; 621 mask = PCIM_BAR_IO_BASE; 622 lobits = PCIM_BAR_IO_SPACE; 623 enbit = PCIM_CMD_PORTEN; 624 break; 625 case PCIBAR_MEM64: 626 /* 627 * XXX 628 * Some drivers do not work well if the 64-bit BAR is allocated 629 * above 4GB. Allow for this by allocating small requests under 630 * 4GB unless then allocation size is larger than some arbitrary 631 * number (128MB currently). 632 */ 633 if (size > 128 * 1024 * 1024) { 634 baseptr = &pci_emul_membase64; 635 limit = PCI_EMUL_MEMLIMIT64; 636 mask = PCIM_BAR_MEM_BASE; 637 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 638 PCIM_BAR_MEM_PREFETCH; 639 } else { 640 baseptr = &pci_emul_membase32; 641 limit = PCI_EMUL_MEMLIMIT32; 642 mask = PCIM_BAR_MEM_BASE; 643 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; 644 } 645 enbit = PCIM_CMD_MEMEN; 646 break; 647 case PCIBAR_MEM32: 648 baseptr = &pci_emul_membase32; 649 limit = PCI_EMUL_MEMLIMIT32; 650 mask = PCIM_BAR_MEM_BASE; 651 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 652 enbit = PCIM_CMD_MEMEN; 653 break; 654 default: 655 printf("pci_emul_alloc_base: invalid bar type %d\n", type); 656 #ifdef FreeBSD 657 assert(0); 658 #else 659 abort(); 660 #endif 661 } 662 663 if (baseptr != NULL) { 664 error = pci_emul_alloc_resource(baseptr, limit, size, &addr); 665 if (error != 0) 666 return (error); 667 } 668 669 pdi->pi_bar[idx].type = type; 670 pdi->pi_bar[idx].addr = addr; 671 pdi->pi_bar[idx].size = size; 672 673 /* Initialize the BAR register in config space */ 674 bar = (addr & mask) | lobits; 675 pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); 676 677 if (type == PCIBAR_MEM64) { 678 assert(idx + 1 <= PCI_BARMAX); 679 pdi->pi_bar[idx + 1].type = PCIBAR_MEMHI64; 680 pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); 681 } 682 683 cmd = pci_get_cfgdata16(pdi, PCIR_COMMAND); 684 if ((cmd & enbit) != enbit) 685 pci_set_cfgdata16(pdi, PCIR_COMMAND, cmd | enbit); 686 register_bar(pdi, idx); 687 688 return (0); 689 } 690 691 #define CAP_START_OFFSET 0x40 692 static int 693 pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) 694 { 695 int i, capoff, reallen; 696 uint16_t sts; 697 698 assert(caplen > 0); 699 700 reallen = roundup2(caplen, 4); /* dword aligned */ 701 702 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 703 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) 704 capoff = CAP_START_OFFSET; 705 else 706 capoff = pi->pi_capend + 1; 707 708 /* Check if we have enough space */ 709 if (capoff + reallen > PCI_REGMAX + 1) 710 return (-1); 711 712 /* Set the previous capability pointer */ 713 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { 714 pci_set_cfgdata8(pi, PCIR_CAP_PTR, capoff); 715 pci_set_cfgdata16(pi, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); 716 } else 717 pci_set_cfgdata8(pi, pi->pi_prevcap + 1, capoff); 718 719 /* Copy the capability */ 720 for (i = 0; i < caplen; i++) 721 pci_set_cfgdata8(pi, capoff + i, capdata[i]); 722 723 /* Set the next capability pointer */ 724 pci_set_cfgdata8(pi, capoff + 1, 0); 725 726 pi->pi_prevcap = capoff; 727 pi->pi_capend = capoff + reallen - 1; 728 return (0); 729 } 730 731 static struct pci_devemu * 732 pci_emul_finddev(char *name) 733 { 734 struct pci_devemu **pdpp, *pdp; 735 736 SET_FOREACH(pdpp, pci_devemu_set) { 737 pdp = *pdpp; 738 if (!strcmp(pdp->pe_emu, name)) { 739 return (pdp); 740 } 741 } 742 743 return (NULL); 744 } 745 746 static int 747 pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int bus, int slot, 748 int func, struct funcinfo *fi) 749 { 750 struct pci_devinst *pdi; 751 int err; 752 753 pdi = calloc(1, sizeof(struct pci_devinst)); 754 755 pdi->pi_vmctx = ctx; 756 pdi->pi_bus = bus; 757 pdi->pi_slot = slot; 758 pdi->pi_func = func; 759 pthread_mutex_init(&pdi->pi_lintr.lock, NULL); 760 pdi->pi_lintr.pin = 0; 761 pdi->pi_lintr.state = IDLE; 762 pdi->pi_lintr.pirq_pin = 0; 763 pdi->pi_lintr.ioapic_irq = 0; 764 pdi->pi_d = pde; 765 snprintf(pdi->pi_name, PI_NAMESZ, "%s-pci-%d", pde->pe_emu, slot); 766 767 /* Disable legacy interrupts */ 768 pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); 769 pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); 770 771 pci_set_cfgdata8(pdi, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN); 772 773 err = (*pde->pe_init)(ctx, pdi, fi->fi_param); 774 if (err == 0) 775 fi->fi_devi = pdi; 776 else 777 free(pdi); 778 779 return (err); 780 } 781 782 void 783 pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) 784 { 785 int mmc; 786 787 /* Number of msi messages must be a power of 2 between 1 and 32 */ 788 assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); 789 mmc = ffs(msgnum) - 1; 790 791 bzero(msicap, sizeof(struct msicap)); 792 msicap->capid = PCIY_MSI; 793 msicap->nextptr = nextptr; 794 msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); 795 } 796 797 int 798 pci_emul_add_msicap(struct pci_devinst *pi, int msgnum) 799 { 800 struct msicap msicap; 801 802 pci_populate_msicap(&msicap, msgnum, 0); 803 804 return (pci_emul_add_capability(pi, (u_char *)&msicap, sizeof(msicap))); 805 } 806 807 static void 808 pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, 809 uint32_t msix_tab_size) 810 { 811 812 assert(msix_tab_size % 4096 == 0); 813 814 bzero(msixcap, sizeof(struct msixcap)); 815 msixcap->capid = PCIY_MSIX; 816 817 /* 818 * Message Control Register, all fields set to 819 * zero except for the Table Size. 820 * Note: Table size N is encoded as N-1 821 */ 822 msixcap->msgctrl = msgnum - 1; 823 824 /* 825 * MSI-X BAR setup: 826 * - MSI-X table start at offset 0 827 * - PBA table starts at a 4K aligned offset after the MSI-X table 828 */ 829 msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; 830 msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); 831 } 832 833 static void 834 pci_msix_table_init(struct pci_devinst *pi, int table_entries) 835 { 836 int i, table_size; 837 838 assert(table_entries > 0); 839 assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); 840 841 table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; 842 pi->pi_msix.table = calloc(1, table_size); 843 844 /* set mask bit of vector control register */ 845 for (i = 0; i < table_entries; i++) 846 pi->pi_msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; 847 } 848 849 int 850 pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum) 851 { 852 uint32_t tab_size; 853 struct msixcap msixcap; 854 855 assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); 856 assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); 857 858 tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; 859 860 /* Align table size to nearest 4K */ 861 tab_size = roundup2(tab_size, 4096); 862 863 pi->pi_msix.table_bar = barnum; 864 pi->pi_msix.pba_bar = barnum; 865 pi->pi_msix.table_offset = 0; 866 pi->pi_msix.table_count = msgnum; 867 pi->pi_msix.pba_offset = tab_size; 868 pi->pi_msix.pba_size = PBA_SIZE(msgnum); 869 870 pci_msix_table_init(pi, msgnum); 871 872 pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size); 873 874 /* allocate memory for MSI-X Table and PBA */ 875 pci_emul_alloc_bar(pi, barnum, PCIBAR_MEM32, 876 tab_size + pi->pi_msix.pba_size); 877 878 return (pci_emul_add_capability(pi, (u_char *)&msixcap, 879 sizeof(msixcap))); 880 } 881 882 static void 883 msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 884 int bytes, uint32_t val) 885 { 886 uint16_t msgctrl, rwmask; 887 int off; 888 889 off = offset - capoff; 890 /* Message Control Register */ 891 if (off == 2 && bytes == 2) { 892 rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; 893 msgctrl = pci_get_cfgdata16(pi, offset); 894 msgctrl &= ~rwmask; 895 msgctrl |= val & rwmask; 896 val = msgctrl; 897 898 pi->pi_msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; 899 pi->pi_msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; 900 pci_lintr_update(pi); 901 } 902 903 CFGWRITE(pi, offset, val, bytes); 904 } 905 906 static void 907 msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 908 int bytes, uint32_t val) 909 { 910 uint16_t msgctrl, rwmask, msgdata, mme; 911 uint32_t addrlo; 912 913 /* 914 * If guest is writing to the message control register make sure 915 * we do not overwrite read-only fields. 916 */ 917 if ((offset - capoff) == 2 && bytes == 2) { 918 rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; 919 msgctrl = pci_get_cfgdata16(pi, offset); 920 msgctrl &= ~rwmask; 921 msgctrl |= val & rwmask; 922 val = msgctrl; 923 } 924 CFGWRITE(pi, offset, val, bytes); 925 926 msgctrl = pci_get_cfgdata16(pi, capoff + 2); 927 addrlo = pci_get_cfgdata32(pi, capoff + 4); 928 if (msgctrl & PCIM_MSICTRL_64BIT) 929 msgdata = pci_get_cfgdata16(pi, capoff + 12); 930 else 931 msgdata = pci_get_cfgdata16(pi, capoff + 8); 932 933 mme = msgctrl & PCIM_MSICTRL_MME_MASK; 934 pi->pi_msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; 935 if (pi->pi_msi.enabled) { 936 pi->pi_msi.addr = addrlo; 937 pi->pi_msi.msg_data = msgdata; 938 pi->pi_msi.maxmsgnum = 1 << (mme >> 4); 939 } else { 940 pi->pi_msi.maxmsgnum = 0; 941 } 942 pci_lintr_update(pi); 943 } 944 945 void 946 pciecap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 947 int bytes, uint32_t val) 948 { 949 950 /* XXX don't write to the readonly parts */ 951 CFGWRITE(pi, offset, val, bytes); 952 } 953 954 #define PCIECAP_VERSION 0x2 955 int 956 pci_emul_add_pciecap(struct pci_devinst *pi, int type) 957 { 958 int err; 959 struct pciecap pciecap; 960 961 bzero(&pciecap, sizeof(pciecap)); 962 963 /* 964 * Use the integrated endpoint type for endpoints on a root complex bus. 965 * 966 * NB: bhyve currently only supports a single PCI bus that is the root 967 * complex bus, so all endpoints are integrated. 968 */ 969 if ((type == PCIEM_TYPE_ENDPOINT) && (pi->pi_bus == 0)) 970 type = PCIEM_TYPE_ROOT_INT_EP; 971 972 pciecap.capid = PCIY_EXPRESS; 973 pciecap.pcie_capabilities = PCIECAP_VERSION | type; 974 if (type != PCIEM_TYPE_ROOT_INT_EP) { 975 pciecap.link_capabilities = 0x411; /* gen1, x1 */ 976 pciecap.link_status = 0x11; /* gen1, x1 */ 977 } 978 979 err = pci_emul_add_capability(pi, (u_char *)&pciecap, sizeof(pciecap)); 980 return (err); 981 } 982 983 /* 984 * This function assumes that 'coff' is in the capabilities region of the 985 * config space. A capoff parameter of zero will force a search for the 986 * offset and type. 987 */ 988 void 989 pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, uint32_t val, 990 uint8_t capoff, int capid) 991 { 992 uint8_t nextoff; 993 994 /* Do not allow un-aligned writes */ 995 if ((offset & (bytes - 1)) != 0) 996 return; 997 998 if (capoff == 0) { 999 /* Find the capability that we want to update */ 1000 capoff = CAP_START_OFFSET; 1001 while (1) { 1002 nextoff = pci_get_cfgdata8(pi, capoff + 1); 1003 if (nextoff == 0) 1004 break; 1005 if (offset >= capoff && offset < nextoff) 1006 break; 1007 1008 capoff = nextoff; 1009 } 1010 assert(offset >= capoff); 1011 capid = pci_get_cfgdata8(pi, capoff); 1012 } 1013 1014 /* 1015 * Capability ID and Next Capability Pointer are readonly. 1016 * However, some o/s's do 4-byte writes that include these. 1017 * For this case, trim the write back to 2 bytes and adjust 1018 * the data. 1019 */ 1020 if (offset == capoff || offset == capoff + 1) { 1021 if (offset == capoff && bytes == 4) { 1022 bytes = 2; 1023 offset += 2; 1024 val >>= 16; 1025 } else 1026 return; 1027 } 1028 1029 switch (capid) { 1030 case PCIY_MSI: 1031 msicap_cfgwrite(pi, capoff, offset, bytes, val); 1032 break; 1033 case PCIY_MSIX: 1034 msixcap_cfgwrite(pi, capoff, offset, bytes, val); 1035 break; 1036 case PCIY_EXPRESS: 1037 pciecap_cfgwrite(pi, capoff, offset, bytes, val); 1038 break; 1039 default: 1040 break; 1041 } 1042 } 1043 1044 static int 1045 pci_emul_iscap(struct pci_devinst *pi, int offset) 1046 { 1047 uint16_t sts; 1048 1049 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 1050 if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { 1051 if (offset >= CAP_START_OFFSET && offset <= pi->pi_capend) 1052 return (1); 1053 } 1054 return (0); 1055 } 1056 1057 static int 1058 pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 1059 int size, uint64_t *val, void *arg1, long arg2) 1060 { 1061 /* 1062 * Ignore writes; return 0xff's for reads. The mem read code 1063 * will take care of truncating to the correct size. 1064 */ 1065 if (dir == MEM_F_READ) { 1066 *val = 0xffffffffffffffff; 1067 } 1068 1069 return (0); 1070 } 1071 1072 static int 1073 pci_emul_ecfg_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 1074 int bytes, uint64_t *val, void *arg1, long arg2) 1075 { 1076 int bus, slot, func, coff, in; 1077 1078 coff = addr & 0xfff; 1079 func = (addr >> 12) & 0x7; 1080 slot = (addr >> 15) & 0x1f; 1081 bus = (addr >> 20) & 0xff; 1082 in = (dir == MEM_F_READ); 1083 if (in) 1084 *val = ~0UL; 1085 pci_cfgrw(ctx, vcpu, in, bus, slot, func, coff, bytes, (uint32_t *)val); 1086 return (0); 1087 } 1088 1089 uint64_t 1090 pci_ecfg_base(void) 1091 { 1092 1093 return (PCI_EMUL_ECFG_BASE); 1094 } 1095 1096 #define BUSIO_ROUNDUP 32 1097 #define BUSMEM_ROUNDUP (1024 * 1024) 1098 1099 int 1100 init_pci(struct vmctx *ctx) 1101 { 1102 struct mem_range mr; 1103 struct pci_devemu *pde; 1104 struct businfo *bi; 1105 struct slotinfo *si; 1106 struct funcinfo *fi; 1107 size_t lowmem; 1108 int bus, slot, func; 1109 int error; 1110 1111 pci_emul_iobase = PCI_EMUL_IOBASE; 1112 pci_emul_membase32 = vm_get_lowmem_limit(ctx); 1113 pci_emul_membase64 = PCI_EMUL_MEMBASE64; 1114 1115 for (bus = 0; bus < MAXBUSES; bus++) { 1116 if ((bi = pci_businfo[bus]) == NULL) 1117 continue; 1118 /* 1119 * Keep track of the i/o and memory resources allocated to 1120 * this bus. 1121 */ 1122 bi->iobase = pci_emul_iobase; 1123 bi->membase32 = pci_emul_membase32; 1124 bi->membase64 = pci_emul_membase64; 1125 1126 for (slot = 0; slot < MAXSLOTS; slot++) { 1127 si = &bi->slotinfo[slot]; 1128 for (func = 0; func < MAXFUNCS; func++) { 1129 fi = &si->si_funcs[func]; 1130 if (fi->fi_name == NULL) 1131 continue; 1132 pde = pci_emul_finddev(fi->fi_name); 1133 assert(pde != NULL); 1134 error = pci_emul_init(ctx, pde, bus, slot, 1135 func, fi); 1136 if (error) 1137 return (error); 1138 } 1139 } 1140 1141 /* 1142 * Add some slop to the I/O and memory resources decoded by 1143 * this bus to give a guest some flexibility if it wants to 1144 * reprogram the BARs. 1145 */ 1146 pci_emul_iobase += BUSIO_ROUNDUP; 1147 pci_emul_iobase = roundup2(pci_emul_iobase, BUSIO_ROUNDUP); 1148 bi->iolimit = pci_emul_iobase; 1149 1150 pci_emul_membase32 += BUSMEM_ROUNDUP; 1151 pci_emul_membase32 = roundup2(pci_emul_membase32, 1152 BUSMEM_ROUNDUP); 1153 bi->memlimit32 = pci_emul_membase32; 1154 1155 pci_emul_membase64 += BUSMEM_ROUNDUP; 1156 pci_emul_membase64 = roundup2(pci_emul_membase64, 1157 BUSMEM_ROUNDUP); 1158 bi->memlimit64 = pci_emul_membase64; 1159 } 1160 1161 /* 1162 * PCI backends are initialized before routing INTx interrupts 1163 * so that LPC devices are able to reserve ISA IRQs before 1164 * routing PIRQ pins. 1165 */ 1166 for (bus = 0; bus < MAXBUSES; bus++) { 1167 if ((bi = pci_businfo[bus]) == NULL) 1168 continue; 1169 1170 for (slot = 0; slot < MAXSLOTS; slot++) { 1171 si = &bi->slotinfo[slot]; 1172 for (func = 0; func < MAXFUNCS; func++) { 1173 fi = &si->si_funcs[func]; 1174 if (fi->fi_devi == NULL) 1175 continue; 1176 pci_lintr_route(fi->fi_devi); 1177 } 1178 } 1179 } 1180 lpc_pirq_routed(); 1181 1182 /* 1183 * The guest physical memory map looks like the following: 1184 * [0, lowmem) guest system memory 1185 * [lowmem, lowmem_limit) memory hole (may be absent) 1186 * [lowmem_limit, 0xE0000000) PCI hole (32-bit BAR allocation) 1187 * [0xE0000000, 0xF0000000) PCI extended config window 1188 * [0xF0000000, 4GB) LAPIC, IOAPIC, HPET, firmware 1189 * [4GB, 4GB + highmem) 1190 */ 1191 1192 /* 1193 * Accesses to memory addresses that are not allocated to system 1194 * memory or PCI devices return 0xff's. 1195 */ 1196 lowmem = vm_get_lowmem_size(ctx); 1197 bzero(&mr, sizeof(struct mem_range)); 1198 mr.name = "PCI hole"; 1199 mr.flags = MEM_F_RW | MEM_F_IMMUTABLE; 1200 mr.base = lowmem; 1201 mr.size = (4ULL * 1024 * 1024 * 1024) - lowmem; 1202 mr.handler = pci_emul_fallback_handler; 1203 error = register_mem_fallback(&mr); 1204 assert(error == 0); 1205 1206 /* PCI extended config space */ 1207 bzero(&mr, sizeof(struct mem_range)); 1208 mr.name = "PCI ECFG"; 1209 mr.flags = MEM_F_RW | MEM_F_IMMUTABLE; 1210 mr.base = PCI_EMUL_ECFG_BASE; 1211 mr.size = PCI_EMUL_ECFG_SIZE; 1212 mr.handler = pci_emul_ecfg_handler; 1213 error = register_mem(&mr); 1214 assert(error == 0); 1215 1216 return (0); 1217 } 1218 1219 static void 1220 pci_apic_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq, 1221 void *arg) 1222 { 1223 1224 dsdt_line(" Package ()"); 1225 dsdt_line(" {"); 1226 dsdt_line(" 0x%X,", slot << 16 | 0xffff); 1227 dsdt_line(" 0x%02X,", pin - 1); 1228 dsdt_line(" Zero,"); 1229 dsdt_line(" 0x%X", ioapic_irq); 1230 dsdt_line(" },"); 1231 } 1232 1233 static void 1234 pci_pirq_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq, 1235 void *arg) 1236 { 1237 char *name; 1238 1239 name = lpc_pirq_name(pirq_pin); 1240 if (name == NULL) 1241 return; 1242 dsdt_line(" Package ()"); 1243 dsdt_line(" {"); 1244 dsdt_line(" 0x%X,", slot << 16 | 0xffff); 1245 dsdt_line(" 0x%02X,", pin - 1); 1246 dsdt_line(" %s,", name); 1247 dsdt_line(" 0x00"); 1248 dsdt_line(" },"); 1249 free(name); 1250 } 1251 1252 /* 1253 * A bhyve virtual machine has a flat PCI hierarchy with a root port 1254 * corresponding to each PCI bus. 1255 */ 1256 static void 1257 pci_bus_write_dsdt(int bus) 1258 { 1259 struct businfo *bi; 1260 struct slotinfo *si; 1261 struct pci_devinst *pi; 1262 int count, func, slot; 1263 1264 /* 1265 * If there are no devices on this 'bus' then just return. 1266 */ 1267 if ((bi = pci_businfo[bus]) == NULL) { 1268 /* 1269 * Bus 0 is special because it decodes the I/O ports used 1270 * for PCI config space access even if there are no devices 1271 * on it. 1272 */ 1273 if (bus != 0) 1274 return; 1275 } 1276 1277 dsdt_line(" Device (PC%02X)", bus); 1278 dsdt_line(" {"); 1279 dsdt_line(" Name (_HID, EisaId (\"PNP0A03\"))"); 1280 1281 dsdt_line(" Method (_BBN, 0, NotSerialized)"); 1282 dsdt_line(" {"); 1283 dsdt_line(" Return (0x%08X)", bus); 1284 dsdt_line(" }"); 1285 dsdt_line(" Name (_CRS, ResourceTemplate ()"); 1286 dsdt_line(" {"); 1287 dsdt_line(" WordBusNumber (ResourceProducer, MinFixed, " 1288 "MaxFixed, PosDecode,"); 1289 dsdt_line(" 0x0000, // Granularity"); 1290 dsdt_line(" 0x%04X, // Range Minimum", bus); 1291 dsdt_line(" 0x%04X, // Range Maximum", bus); 1292 dsdt_line(" 0x0000, // Translation Offset"); 1293 dsdt_line(" 0x0001, // Length"); 1294 dsdt_line(" ,, )"); 1295 1296 if (bus == 0) { 1297 dsdt_indent(3); 1298 dsdt_fixed_ioport(0xCF8, 8); 1299 dsdt_unindent(3); 1300 1301 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1302 "PosDecode, EntireRange,"); 1303 dsdt_line(" 0x0000, // Granularity"); 1304 dsdt_line(" 0x0000, // Range Minimum"); 1305 dsdt_line(" 0x0CF7, // Range Maximum"); 1306 dsdt_line(" 0x0000, // Translation Offset"); 1307 dsdt_line(" 0x0CF8, // Length"); 1308 dsdt_line(" ,, , TypeStatic)"); 1309 1310 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1311 "PosDecode, EntireRange,"); 1312 dsdt_line(" 0x0000, // Granularity"); 1313 dsdt_line(" 0x0D00, // Range Minimum"); 1314 dsdt_line(" 0x%04X, // Range Maximum", 1315 PCI_EMUL_IOBASE - 1); 1316 dsdt_line(" 0x0000, // Translation Offset"); 1317 dsdt_line(" 0x%04X, // Length", 1318 PCI_EMUL_IOBASE - 0x0D00); 1319 dsdt_line(" ,, , TypeStatic)"); 1320 1321 if (bi == NULL) { 1322 dsdt_line(" })"); 1323 goto done; 1324 } 1325 } 1326 assert(bi != NULL); 1327 1328 /* i/o window */ 1329 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1330 "PosDecode, EntireRange,"); 1331 dsdt_line(" 0x0000, // Granularity"); 1332 dsdt_line(" 0x%04X, // Range Minimum", bi->iobase); 1333 dsdt_line(" 0x%04X, // Range Maximum", 1334 bi->iolimit - 1); 1335 dsdt_line(" 0x0000, // Translation Offset"); 1336 dsdt_line(" 0x%04X, // Length", 1337 bi->iolimit - bi->iobase); 1338 dsdt_line(" ,, , TypeStatic)"); 1339 1340 /* mmio window (32-bit) */ 1341 dsdt_line(" DWordMemory (ResourceProducer, PosDecode, " 1342 "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); 1343 dsdt_line(" 0x00000000, // Granularity"); 1344 dsdt_line(" 0x%08X, // Range Minimum\n", bi->membase32); 1345 dsdt_line(" 0x%08X, // Range Maximum\n", 1346 bi->memlimit32 - 1); 1347 dsdt_line(" 0x00000000, // Translation Offset"); 1348 dsdt_line(" 0x%08X, // Length\n", 1349 bi->memlimit32 - bi->membase32); 1350 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); 1351 1352 /* mmio window (64-bit) */ 1353 dsdt_line(" QWordMemory (ResourceProducer, PosDecode, " 1354 "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); 1355 dsdt_line(" 0x0000000000000000, // Granularity"); 1356 dsdt_line(" 0x%016lX, // Range Minimum\n", bi->membase64); 1357 dsdt_line(" 0x%016lX, // Range Maximum\n", 1358 bi->memlimit64 - 1); 1359 dsdt_line(" 0x0000000000000000, // Translation Offset"); 1360 dsdt_line(" 0x%016lX, // Length\n", 1361 bi->memlimit64 - bi->membase64); 1362 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); 1363 dsdt_line(" })"); 1364 1365 count = pci_count_lintr(bus); 1366 if (count != 0) { 1367 dsdt_indent(2); 1368 dsdt_line("Name (PPRT, Package ()"); 1369 dsdt_line("{"); 1370 pci_walk_lintr(bus, pci_pirq_prt_entry, NULL); 1371 dsdt_line("})"); 1372 dsdt_line("Name (APRT, Package ()"); 1373 dsdt_line("{"); 1374 pci_walk_lintr(bus, pci_apic_prt_entry, NULL); 1375 dsdt_line("})"); 1376 dsdt_line("Method (_PRT, 0, NotSerialized)"); 1377 dsdt_line("{"); 1378 dsdt_line(" If (PICM)"); 1379 dsdt_line(" {"); 1380 dsdt_line(" Return (APRT)"); 1381 dsdt_line(" }"); 1382 dsdt_line(" Else"); 1383 dsdt_line(" {"); 1384 dsdt_line(" Return (PPRT)"); 1385 dsdt_line(" }"); 1386 dsdt_line("}"); 1387 dsdt_unindent(2); 1388 } 1389 1390 dsdt_indent(2); 1391 for (slot = 0; slot < MAXSLOTS; slot++) { 1392 si = &bi->slotinfo[slot]; 1393 for (func = 0; func < MAXFUNCS; func++) { 1394 pi = si->si_funcs[func].fi_devi; 1395 if (pi != NULL && pi->pi_d->pe_write_dsdt != NULL) 1396 pi->pi_d->pe_write_dsdt(pi); 1397 } 1398 } 1399 dsdt_unindent(2); 1400 done: 1401 dsdt_line(" }"); 1402 } 1403 1404 void 1405 pci_write_dsdt(void) 1406 { 1407 int bus; 1408 1409 dsdt_indent(1); 1410 dsdt_line("Name (PICM, 0x00)"); 1411 dsdt_line("Method (_PIC, 1, NotSerialized)"); 1412 dsdt_line("{"); 1413 dsdt_line(" Store (Arg0, PICM)"); 1414 dsdt_line("}"); 1415 dsdt_line(""); 1416 dsdt_line("Scope (_SB)"); 1417 dsdt_line("{"); 1418 for (bus = 0; bus < MAXBUSES; bus++) 1419 pci_bus_write_dsdt(bus); 1420 dsdt_line("}"); 1421 dsdt_unindent(1); 1422 } 1423 1424 int 1425 pci_bus_configured(int bus) 1426 { 1427 assert(bus >= 0 && bus < MAXBUSES); 1428 return (pci_businfo[bus] != NULL); 1429 } 1430 1431 int 1432 pci_msi_enabled(struct pci_devinst *pi) 1433 { 1434 return (pi->pi_msi.enabled); 1435 } 1436 1437 int 1438 pci_msi_maxmsgnum(struct pci_devinst *pi) 1439 { 1440 if (pi->pi_msi.enabled) 1441 return (pi->pi_msi.maxmsgnum); 1442 else 1443 return (0); 1444 } 1445 1446 int 1447 pci_msix_enabled(struct pci_devinst *pi) 1448 { 1449 1450 return (pi->pi_msix.enabled && !pi->pi_msi.enabled); 1451 } 1452 1453 void 1454 pci_generate_msix(struct pci_devinst *pi, int index) 1455 { 1456 struct msix_table_entry *mte; 1457 1458 if (!pci_msix_enabled(pi)) 1459 return; 1460 1461 if (pi->pi_msix.function_mask) 1462 return; 1463 1464 if (index >= pi->pi_msix.table_count) 1465 return; 1466 1467 mte = &pi->pi_msix.table[index]; 1468 if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 1469 /* XXX Set PBA bit if interrupt is disabled */ 1470 vm_lapic_msi(pi->pi_vmctx, mte->addr, mte->msg_data); 1471 } 1472 } 1473 1474 void 1475 pci_generate_msi(struct pci_devinst *pi, int index) 1476 { 1477 1478 if (pci_msi_enabled(pi) && index < pci_msi_maxmsgnum(pi)) { 1479 vm_lapic_msi(pi->pi_vmctx, pi->pi_msi.addr, 1480 pi->pi_msi.msg_data + index); 1481 } 1482 } 1483 1484 static bool 1485 pci_lintr_permitted(struct pci_devinst *pi) 1486 { 1487 uint16_t cmd; 1488 1489 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 1490 return (!(pi->pi_msi.enabled || pi->pi_msix.enabled || 1491 (cmd & PCIM_CMD_INTxDIS))); 1492 } 1493 1494 void 1495 pci_lintr_request(struct pci_devinst *pi) 1496 { 1497 struct businfo *bi; 1498 struct slotinfo *si; 1499 int bestpin, bestcount, pin; 1500 1501 bi = pci_businfo[pi->pi_bus]; 1502 assert(bi != NULL); 1503 1504 /* 1505 * Just allocate a pin from our slot. The pin will be 1506 * assigned IRQs later when interrupts are routed. 1507 */ 1508 si = &bi->slotinfo[pi->pi_slot]; 1509 bestpin = 0; 1510 bestcount = si->si_intpins[0].ii_count; 1511 for (pin = 1; pin < 4; pin++) { 1512 if (si->si_intpins[pin].ii_count < bestcount) { 1513 bestpin = pin; 1514 bestcount = si->si_intpins[pin].ii_count; 1515 } 1516 } 1517 1518 si->si_intpins[bestpin].ii_count++; 1519 pi->pi_lintr.pin = bestpin + 1; 1520 pci_set_cfgdata8(pi, PCIR_INTPIN, bestpin + 1); 1521 } 1522 1523 static void 1524 pci_lintr_route(struct pci_devinst *pi) 1525 { 1526 struct businfo *bi; 1527 struct intxinfo *ii; 1528 1529 if (pi->pi_lintr.pin == 0) 1530 return; 1531 1532 bi = pci_businfo[pi->pi_bus]; 1533 assert(bi != NULL); 1534 ii = &bi->slotinfo[pi->pi_slot].si_intpins[pi->pi_lintr.pin - 1]; 1535 1536 /* 1537 * Attempt to allocate an I/O APIC pin for this intpin if one 1538 * is not yet assigned. 1539 */ 1540 if (ii->ii_ioapic_irq == 0) 1541 ii->ii_ioapic_irq = ioapic_pci_alloc_irq(pi); 1542 assert(ii->ii_ioapic_irq > 0); 1543 1544 /* 1545 * Attempt to allocate a PIRQ pin for this intpin if one is 1546 * not yet assigned. 1547 */ 1548 if (ii->ii_pirq_pin == 0) 1549 ii->ii_pirq_pin = pirq_alloc_pin(pi); 1550 assert(ii->ii_pirq_pin > 0); 1551 1552 pi->pi_lintr.ioapic_irq = ii->ii_ioapic_irq; 1553 pi->pi_lintr.pirq_pin = ii->ii_pirq_pin; 1554 pci_set_cfgdata8(pi, PCIR_INTLINE, pirq_irq(ii->ii_pirq_pin)); 1555 } 1556 1557 void 1558 pci_lintr_assert(struct pci_devinst *pi) 1559 { 1560 1561 assert(pi->pi_lintr.pin > 0); 1562 1563 pthread_mutex_lock(&pi->pi_lintr.lock); 1564 if (pi->pi_lintr.state == IDLE) { 1565 if (pci_lintr_permitted(pi)) { 1566 pi->pi_lintr.state = ASSERTED; 1567 pci_irq_assert(pi); 1568 } else 1569 pi->pi_lintr.state = PENDING; 1570 } 1571 pthread_mutex_unlock(&pi->pi_lintr.lock); 1572 } 1573 1574 void 1575 pci_lintr_deassert(struct pci_devinst *pi) 1576 { 1577 1578 assert(pi->pi_lintr.pin > 0); 1579 1580 pthread_mutex_lock(&pi->pi_lintr.lock); 1581 if (pi->pi_lintr.state == ASSERTED) { 1582 pi->pi_lintr.state = IDLE; 1583 pci_irq_deassert(pi); 1584 } else if (pi->pi_lintr.state == PENDING) 1585 pi->pi_lintr.state = IDLE; 1586 pthread_mutex_unlock(&pi->pi_lintr.lock); 1587 } 1588 1589 static void 1590 pci_lintr_update(struct pci_devinst *pi) 1591 { 1592 1593 pthread_mutex_lock(&pi->pi_lintr.lock); 1594 if (pi->pi_lintr.state == ASSERTED && !pci_lintr_permitted(pi)) { 1595 pci_irq_deassert(pi); 1596 pi->pi_lintr.state = PENDING; 1597 } else if (pi->pi_lintr.state == PENDING && pci_lintr_permitted(pi)) { 1598 pi->pi_lintr.state = ASSERTED; 1599 pci_irq_assert(pi); 1600 } 1601 pthread_mutex_unlock(&pi->pi_lintr.lock); 1602 #ifndef __FreeBSD__ 1603 if (pi->pi_d->pe_lintrupdate != NULL) { 1604 pi->pi_d->pe_lintrupdate(pi); 1605 } 1606 #endif /* __FreeBSD__ */ 1607 } 1608 1609 int 1610 pci_count_lintr(int bus) 1611 { 1612 int count, slot, pin; 1613 struct slotinfo *slotinfo; 1614 1615 count = 0; 1616 if (pci_businfo[bus] != NULL) { 1617 for (slot = 0; slot < MAXSLOTS; slot++) { 1618 slotinfo = &pci_businfo[bus]->slotinfo[slot]; 1619 for (pin = 0; pin < 4; pin++) { 1620 if (slotinfo->si_intpins[pin].ii_count != 0) 1621 count++; 1622 } 1623 } 1624 } 1625 return (count); 1626 } 1627 1628 void 1629 pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg) 1630 { 1631 struct businfo *bi; 1632 struct slotinfo *si; 1633 struct intxinfo *ii; 1634 int slot, pin; 1635 1636 if ((bi = pci_businfo[bus]) == NULL) 1637 return; 1638 1639 for (slot = 0; slot < MAXSLOTS; slot++) { 1640 si = &bi->slotinfo[slot]; 1641 for (pin = 0; pin < 4; pin++) { 1642 ii = &si->si_intpins[pin]; 1643 if (ii->ii_count != 0) 1644 cb(bus, slot, pin + 1, ii->ii_pirq_pin, 1645 ii->ii_ioapic_irq, arg); 1646 } 1647 } 1648 } 1649 1650 /* 1651 * Return 1 if the emulated device in 'slot' is a multi-function device. 1652 * Return 0 otherwise. 1653 */ 1654 static int 1655 pci_emul_is_mfdev(int bus, int slot) 1656 { 1657 struct businfo *bi; 1658 struct slotinfo *si; 1659 int f, numfuncs; 1660 1661 numfuncs = 0; 1662 if ((bi = pci_businfo[bus]) != NULL) { 1663 si = &bi->slotinfo[slot]; 1664 for (f = 0; f < MAXFUNCS; f++) { 1665 if (si->si_funcs[f].fi_devi != NULL) { 1666 numfuncs++; 1667 } 1668 } 1669 } 1670 return (numfuncs > 1); 1671 } 1672 1673 /* 1674 * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on 1675 * whether or not is a multi-function being emulated in the pci 'slot'. 1676 */ 1677 static void 1678 pci_emul_hdrtype_fixup(int bus, int slot, int off, int bytes, uint32_t *rv) 1679 { 1680 int mfdev; 1681 1682 if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { 1683 mfdev = pci_emul_is_mfdev(bus, slot); 1684 switch (bytes) { 1685 case 1: 1686 case 2: 1687 *rv &= ~PCIM_MFDEV; 1688 if (mfdev) { 1689 *rv |= PCIM_MFDEV; 1690 } 1691 break; 1692 case 4: 1693 *rv &= ~(PCIM_MFDEV << 16); 1694 if (mfdev) { 1695 *rv |= (PCIM_MFDEV << 16); 1696 } 1697 break; 1698 } 1699 } 1700 } 1701 1702 /* 1703 * Update device state in response to changes to the PCI command 1704 * register. 1705 */ 1706 void 1707 pci_emul_cmd_changed(struct pci_devinst *pi, uint16_t old) 1708 { 1709 int i; 1710 uint16_t changed, new; 1711 1712 new = pci_get_cfgdata16(pi, PCIR_COMMAND); 1713 changed = old ^ new; 1714 1715 /* 1716 * If the MMIO or I/O address space decoding has changed then 1717 * register/unregister all BARs that decode that address space. 1718 */ 1719 for (i = 0; i <= PCI_BARMAX; i++) { 1720 switch (pi->pi_bar[i].type) { 1721 case PCIBAR_NONE: 1722 case PCIBAR_MEMHI64: 1723 break; 1724 case PCIBAR_IO: 1725 /* I/O address space decoding changed? */ 1726 if (changed & PCIM_CMD_PORTEN) { 1727 if (new & PCIM_CMD_PORTEN) 1728 register_bar(pi, i); 1729 else 1730 unregister_bar(pi, i); 1731 } 1732 break; 1733 case PCIBAR_MEM32: 1734 case PCIBAR_MEM64: 1735 /* MMIO address space decoding changed? */ 1736 if (changed & PCIM_CMD_MEMEN) { 1737 if (new & PCIM_CMD_MEMEN) 1738 register_bar(pi, i); 1739 else 1740 unregister_bar(pi, i); 1741 } 1742 break; 1743 default: 1744 assert(0); 1745 } 1746 } 1747 1748 /* 1749 * If INTx has been unmasked and is pending, assert the 1750 * interrupt. 1751 */ 1752 pci_lintr_update(pi); 1753 } 1754 1755 static void 1756 pci_emul_cmdsts_write(struct pci_devinst *pi, int coff, uint32_t new, int bytes) 1757 { 1758 int rshift; 1759 uint32_t cmd, old, readonly; 1760 1761 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); /* stash old value */ 1762 1763 /* 1764 * From PCI Local Bus Specification 3.0 sections 6.2.2 and 6.2.3. 1765 * 1766 * XXX Bits 8, 11, 12, 13, 14 and 15 in the status register are 1767 * 'write 1 to clear'. However these bits are not set to '1' by 1768 * any device emulation so it is simpler to treat them as readonly. 1769 */ 1770 rshift = (coff & 0x3) * 8; 1771 readonly = 0xFFFFF880 >> rshift; 1772 1773 old = CFGREAD(pi, coff, bytes); 1774 new &= ~readonly; 1775 new |= (old & readonly); 1776 CFGWRITE(pi, coff, new, bytes); /* update config */ 1777 1778 pci_emul_cmd_changed(pi, cmd); 1779 } 1780 1781 static void 1782 pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func, 1783 int coff, int bytes, uint32_t *eax) 1784 { 1785 struct businfo *bi; 1786 struct slotinfo *si; 1787 struct pci_devinst *pi; 1788 struct pci_devemu *pe; 1789 int idx, needcfg; 1790 uint64_t addr, mask; 1791 uint64_t bar = 0; 1792 1793 if ((bi = pci_businfo[bus]) != NULL) { 1794 si = &bi->slotinfo[slot]; 1795 pi = si->si_funcs[func].fi_devi; 1796 } else 1797 pi = NULL; 1798 1799 /* 1800 * Just return if there is no device at this slot:func or if the 1801 * the guest is doing an un-aligned access. 1802 */ 1803 if (pi == NULL || (bytes != 1 && bytes != 2 && bytes != 4) || 1804 (coff & (bytes - 1)) != 0) { 1805 if (in) 1806 *eax = 0xffffffff; 1807 return; 1808 } 1809 1810 /* 1811 * Ignore all writes beyond the standard config space and return all 1812 * ones on reads. 1813 */ 1814 if (coff >= PCI_REGMAX + 1) { 1815 if (in) { 1816 *eax = 0xffffffff; 1817 /* 1818 * Extended capabilities begin at offset 256 in config 1819 * space. Absence of extended capabilities is signaled 1820 * with all 0s in the extended capability header at 1821 * offset 256. 1822 */ 1823 if (coff <= PCI_REGMAX + 4) 1824 *eax = 0x00000000; 1825 } 1826 return; 1827 } 1828 1829 pe = pi->pi_d; 1830 1831 /* 1832 * Config read 1833 */ 1834 if (in) { 1835 /* Let the device emulation override the default handler */ 1836 if (pe->pe_cfgread != NULL) { 1837 needcfg = pe->pe_cfgread(ctx, vcpu, pi, coff, bytes, 1838 eax); 1839 } else { 1840 needcfg = 1; 1841 } 1842 1843 if (needcfg) 1844 *eax = CFGREAD(pi, coff, bytes); 1845 1846 pci_emul_hdrtype_fixup(bus, slot, coff, bytes, eax); 1847 } else { 1848 /* Let the device emulation override the default handler */ 1849 if (pe->pe_cfgwrite != NULL && 1850 (*pe->pe_cfgwrite)(ctx, vcpu, pi, coff, bytes, *eax) == 0) 1851 return; 1852 1853 /* 1854 * Special handling for write to BAR registers 1855 */ 1856 if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) { 1857 /* 1858 * Ignore writes to BAR registers that are not 1859 * 4-byte aligned. 1860 */ 1861 if (bytes != 4 || (coff & 0x3) != 0) 1862 return; 1863 idx = (coff - PCIR_BAR(0)) / 4; 1864 mask = ~(pi->pi_bar[idx].size - 1); 1865 switch (pi->pi_bar[idx].type) { 1866 case PCIBAR_NONE: 1867 pi->pi_bar[idx].addr = bar = 0; 1868 break; 1869 case PCIBAR_IO: 1870 addr = *eax & mask; 1871 addr &= 0xffff; 1872 bar = addr | PCIM_BAR_IO_SPACE; 1873 /* 1874 * Register the new BAR value for interception 1875 */ 1876 if (addr != pi->pi_bar[idx].addr) { 1877 update_bar_address(pi, addr, idx, 1878 PCIBAR_IO); 1879 } 1880 break; 1881 case PCIBAR_MEM32: 1882 addr = bar = *eax & mask; 1883 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 1884 if (addr != pi->pi_bar[idx].addr) { 1885 update_bar_address(pi, addr, idx, 1886 PCIBAR_MEM32); 1887 } 1888 break; 1889 case PCIBAR_MEM64: 1890 addr = bar = *eax & mask; 1891 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 1892 PCIM_BAR_MEM_PREFETCH; 1893 if (addr != (uint32_t)pi->pi_bar[idx].addr) { 1894 update_bar_address(pi, addr, idx, 1895 PCIBAR_MEM64); 1896 } 1897 break; 1898 case PCIBAR_MEMHI64: 1899 mask = ~(pi->pi_bar[idx - 1].size - 1); 1900 addr = ((uint64_t)*eax << 32) & mask; 1901 bar = addr >> 32; 1902 if (bar != pi->pi_bar[idx - 1].addr >> 32) { 1903 update_bar_address(pi, addr, idx - 1, 1904 PCIBAR_MEMHI64); 1905 } 1906 break; 1907 default: 1908 assert(0); 1909 } 1910 pci_set_cfgdata32(pi, coff, bar); 1911 1912 } else if (pci_emul_iscap(pi, coff)) { 1913 pci_emul_capwrite(pi, coff, bytes, *eax, 0, 0); 1914 } else if (coff >= PCIR_COMMAND && coff < PCIR_REVID) { 1915 pci_emul_cmdsts_write(pi, coff, *eax, bytes); 1916 } else { 1917 CFGWRITE(pi, coff, *eax, bytes); 1918 } 1919 } 1920 } 1921 1922 static int cfgenable, cfgbus, cfgslot, cfgfunc, cfgoff; 1923 1924 static int 1925 pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1926 uint32_t *eax, void *arg) 1927 { 1928 uint32_t x; 1929 1930 if (bytes != 4) { 1931 if (in) 1932 *eax = (bytes == 2) ? 0xffff : 0xff; 1933 return (0); 1934 } 1935 1936 if (in) { 1937 x = (cfgbus << 16) | (cfgslot << 11) | (cfgfunc << 8) | cfgoff; 1938 if (cfgenable) 1939 x |= CONF1_ENABLE; 1940 *eax = x; 1941 } else { 1942 x = *eax; 1943 cfgenable = (x & CONF1_ENABLE) == CONF1_ENABLE; 1944 cfgoff = x & PCI_REGMAX; 1945 cfgfunc = (x >> 8) & PCI_FUNCMAX; 1946 cfgslot = (x >> 11) & PCI_SLOTMAX; 1947 cfgbus = (x >> 16) & PCI_BUSMAX; 1948 } 1949 1950 return (0); 1951 } 1952 INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_INOUT, pci_emul_cfgaddr); 1953 1954 static int 1955 pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1956 uint32_t *eax, void *arg) 1957 { 1958 int coff; 1959 1960 assert(bytes == 1 || bytes == 2 || bytes == 4); 1961 1962 coff = cfgoff + (port - CONF1_DATA_PORT); 1963 if (cfgenable) { 1964 pci_cfgrw(ctx, vcpu, in, cfgbus, cfgslot, cfgfunc, coff, bytes, 1965 eax); 1966 } else { 1967 /* Ignore accesses to cfgdata if not enabled by cfgaddr */ 1968 if (in) 1969 *eax = 0xffffffff; 1970 } 1971 return (0); 1972 } 1973 1974 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); 1975 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); 1976 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); 1977 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); 1978 1979 #define PCI_EMUL_TEST 1980 #ifdef PCI_EMUL_TEST 1981 /* 1982 * Define a dummy test device 1983 */ 1984 #define DIOSZ 8 1985 #define DMEMSZ 4096 1986 struct pci_emul_dsoftc { 1987 uint8_t ioregs[DIOSZ]; 1988 uint8_t memregs[2][DMEMSZ]; 1989 }; 1990 1991 #define PCI_EMUL_MSI_MSGS 4 1992 #define PCI_EMUL_MSIX_MSGS 16 1993 1994 static int 1995 pci_emul_dinit(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 1996 { 1997 int error; 1998 struct pci_emul_dsoftc *sc; 1999 2000 sc = calloc(1, sizeof(struct pci_emul_dsoftc)); 2001 2002 pi->pi_arg = sc; 2003 2004 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0001); 2005 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x10DD); 2006 pci_set_cfgdata8(pi, PCIR_CLASS, 0x02); 2007 2008 error = pci_emul_add_msicap(pi, PCI_EMUL_MSI_MSGS); 2009 assert(error == 0); 2010 2011 error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, DIOSZ); 2012 assert(error == 0); 2013 2014 error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, DMEMSZ); 2015 assert(error == 0); 2016 2017 error = pci_emul_alloc_bar(pi, 2, PCIBAR_MEM32, DMEMSZ); 2018 assert(error == 0); 2019 2020 return (0); 2021 } 2022 2023 static void 2024 pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 2025 uint64_t offset, int size, uint64_t value) 2026 { 2027 int i; 2028 struct pci_emul_dsoftc *sc = pi->pi_arg; 2029 2030 if (baridx == 0) { 2031 if (offset + size > DIOSZ) { 2032 printf("diow: iow too large, offset %ld size %d\n", 2033 offset, size); 2034 return; 2035 } 2036 2037 if (size == 1) { 2038 sc->ioregs[offset] = value & 0xff; 2039 } else if (size == 2) { 2040 *(uint16_t *)&sc->ioregs[offset] = value & 0xffff; 2041 } else if (size == 4) { 2042 *(uint32_t *)&sc->ioregs[offset] = value; 2043 } else { 2044 printf("diow: iow unknown size %d\n", size); 2045 } 2046 2047 /* 2048 * Special magic value to generate an interrupt 2049 */ 2050 if (offset == 4 && size == 4 && pci_msi_enabled(pi)) 2051 pci_generate_msi(pi, value % pci_msi_maxmsgnum(pi)); 2052 2053 if (value == 0xabcdef) { 2054 for (i = 0; i < pci_msi_maxmsgnum(pi); i++) 2055 pci_generate_msi(pi, i); 2056 } 2057 } 2058 2059 if (baridx == 1 || baridx == 2) { 2060 if (offset + size > DMEMSZ) { 2061 printf("diow: memw too large, offset %ld size %d\n", 2062 offset, size); 2063 return; 2064 } 2065 2066 i = baridx - 1; /* 'memregs' index */ 2067 2068 if (size == 1) { 2069 sc->memregs[i][offset] = value; 2070 } else if (size == 2) { 2071 *(uint16_t *)&sc->memregs[i][offset] = value; 2072 } else if (size == 4) { 2073 *(uint32_t *)&sc->memregs[i][offset] = value; 2074 } else if (size == 8) { 2075 *(uint64_t *)&sc->memregs[i][offset] = value; 2076 } else { 2077 printf("diow: memw unknown size %d\n", size); 2078 } 2079 2080 /* 2081 * magic interrupt ?? 2082 */ 2083 } 2084 2085 if (baridx > 2 || baridx < 0) { 2086 printf("diow: unknown bar idx %d\n", baridx); 2087 } 2088 } 2089 2090 static uint64_t 2091 pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 2092 uint64_t offset, int size) 2093 { 2094 struct pci_emul_dsoftc *sc = pi->pi_arg; 2095 uint32_t value; 2096 int i; 2097 2098 value = 0; 2099 if (baridx == 0) { 2100 if (offset + size > DIOSZ) { 2101 printf("dior: ior too large, offset %ld size %d\n", 2102 offset, size); 2103 return (0); 2104 } 2105 2106 value = 0; 2107 if (size == 1) { 2108 value = sc->ioregs[offset]; 2109 } else if (size == 2) { 2110 value = *(uint16_t *) &sc->ioregs[offset]; 2111 } else if (size == 4) { 2112 value = *(uint32_t *) &sc->ioregs[offset]; 2113 } else { 2114 printf("dior: ior unknown size %d\n", size); 2115 } 2116 } 2117 2118 if (baridx == 1 || baridx == 2) { 2119 if (offset + size > DMEMSZ) { 2120 printf("dior: memr too large, offset %ld size %d\n", 2121 offset, size); 2122 return (0); 2123 } 2124 2125 i = baridx - 1; /* 'memregs' index */ 2126 2127 if (size == 1) { 2128 value = sc->memregs[i][offset]; 2129 } else if (size == 2) { 2130 value = *(uint16_t *) &sc->memregs[i][offset]; 2131 } else if (size == 4) { 2132 value = *(uint32_t *) &sc->memregs[i][offset]; 2133 } else if (size == 8) { 2134 value = *(uint64_t *) &sc->memregs[i][offset]; 2135 } else { 2136 printf("dior: ior unknown size %d\n", size); 2137 } 2138 } 2139 2140 2141 if (baridx > 2 || baridx < 0) { 2142 printf("dior: unknown bar idx %d\n", baridx); 2143 return (0); 2144 } 2145 2146 return (value); 2147 } 2148 2149 struct pci_devemu pci_dummy = { 2150 .pe_emu = "dummy", 2151 .pe_init = pci_emul_dinit, 2152 .pe_barwrite = pci_emul_diow, 2153 .pe_barread = pci_emul_dior 2154 }; 2155 PCI_EMUL_SET(pci_dummy); 2156 2157 #endif /* PCI_EMUL_TEST */ 2158